注意
转到末尾 下载完整的示例代码
元数据¶
ONNX 格式包含与模型生成方式相关的元数据。当模型部署到生产环境时,它对于跟踪在特定时间使用了哪个实例很有用。让我们看看如何使用经过 *scikit-learn* 训练的简单逻辑回归模型来做到这一点。
import skl2onnx
import onnxruntime
import sklearn
import numpy
from onnxruntime import InferenceSession
import onnx
from onnxruntime.datasets import get_example
example = get_example("logreg_iris.onnx")
model = onnx.load(example)
print("doc_string={}".format(model.doc_string))
print("domain={}".format(model.domain))
print("ir_version={}".format(model.ir_version))
print("metadata_props={}".format(model.metadata_props))
print("model_version={}".format(model.model_version))
print("producer_name={}".format(model.producer_name))
print("producer_version={}".format(model.producer_version))
doc_string=
domain=onnxml
ir_version=3
metadata_props=[]
model_version=0
producer_name=OnnxMLTools
producer_version=1.2.0.0116
使用 *ONNX Runtime*
sess = InferenceSession(example)
meta = sess.get_modelmeta()
print("custom_metadata_map={}".format(meta.custom_metadata_map))
print("description={}".format(meta.description))
print("domain={}".format(meta.domain))
print("graph_name={}".format(meta.graph_name))
print("producer_name={}".format(meta.producer_name))
print("version={}".format(meta.version))
Traceback (most recent call last):
File "/home/xadupre/github/sklearn-onnx/docs/examples/plot_metadata.py", line 42, in <module>
sess = InferenceSession(example)
File "/home/xadupre/github/onnxruntime/build/linux_cuda/Release/onnxruntime/capi/onnxruntime_inference_collection.py", line 432, in __init__
raise e
File "/home/xadupre/github/onnxruntime/build/linux_cuda/Release/onnxruntime/capi/onnxruntime_inference_collection.py", line 419, in __init__
self._create_inference_session(providers, provider_options, disabled_optimizers)
File "/home/xadupre/github/onnxruntime/build/linux_cuda/Release/onnxruntime/capi/onnxruntime_inference_collection.py", line 451, in _create_inference_session
raise ValueError(
ValueError: This ORT build has ['CUDAExecutionProvider', 'CPUExecutionProvider'] enabled. Since ORT 1.9, you are required to explicitly set the providers parameter when instantiating InferenceSession. For example, onnxruntime.InferenceSession(..., providers=['CUDAExecutionProvider', 'CPUExecutionProvider'], ...)
此示例使用的版本
print("numpy:", numpy.__version__)
print("scikit-learn:", sklearn.__version__)
print("onnx: ", onnx.__version__)
print("onnxruntime: ", onnxruntime.__version__)
print("skl2onnx: ", skl2onnx.__version__)
脚本的总运行时间:(0 分钟 0.008 秒)