Python API 概述¶
完整的 API 在 API 参考 中描述。
加载 ONNX 模型¶
import onnx
# onnx_model is an in-memory ModelProto
onnx_model = onnx.load("path/to/the/model.onnx")
可运行的 IPython 笔记本
使用外部数据加载 ONNX 模型¶
[默认] 如果外部数据位于模型的同一目录下,只需使用
onnx.load()
import onnx
onnx_model = onnx.load("path/to/the/model.onnx")
如果外部数据位于另一个目录下,请使用
load_external_data_for_model()
指定目录路径,并在使用onnx.load()
后加载
import onnx
from onnx.external_data_helper import load_external_data_for_model
onnx_model = onnx.load("path/to/the/model.onnx", load_external_data=False)
load_external_data_for_model(onnx_model, "data/directory/path/")
# Then the onnx_model has loaded the external data from the specific directory
将 ONNX 模型转换为外部数据¶
from onnx.external_data_helper import convert_model_to_external_data
# onnx_model is an in-memory ModelProto
onnx_model = ...
convert_model_to_external_data(onnx_model, all_tensors_to_one_file=True, location="filename", size_threshold=1024, convert_attribute=False)
# Then the onnx_model has converted raw data as external data
# Must be followed by save
保存 ONNX 模型¶
import onnx
# onnx_model is an in-memory ModelProto
onnx_model = ...
# Save the ONNX model
onnx.save(onnx_model, "path/to/the/model.onnx")
可运行的 IPython 笔记本
将 ONNX 模型转换为外部数据并保存¶
import onnx
# onnx_model is an in-memory ModelProto
onnx_model = ...
onnx.save_model(onnx_model, "path/to/save/the/model.onnx", save_as_external_data=True, all_tensors_to_one_file=True, location="filename", size_threshold=1024, convert_attribute=False)
# Then the onnx_model has converted raw data as external data and saved to specific directory
操作 TensorProto 和 Numpy 数组¶
import numpy
import onnx
from onnx import numpy_helper
# Preprocessing: create a Numpy array
numpy_array = numpy.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=float)
print(f"Original Numpy array:\n{numpy_array}\n")
# Convert the Numpy array to a TensorProto
tensor = numpy_helper.from_array(numpy_array)
print(f"TensorProto:\n{tensor}")
# Convert the TensorProto to a Numpy array
new_array = numpy_helper.to_array(tensor)
print(f"After round trip, Numpy array:\n{new_array}\n")
# Save the TensorProto
with open("tensor.pb", "wb") as f:
f.write(tensor.SerializeToString())
# Load a TensorProto
new_tensor = onnx.TensorProto()
with open("tensor.pb", "rb") as f:
new_tensor.ParseFromString(f.read())
print(f"After saving and loading, new TensorProto:\n{new_tensor}")
from onnx import TensorProto, helper
# Conversion utilities for mapping attributes in ONNX IR
# The functions below are available after ONNX 1.13
np_dtype = helper.tensor_dtype_to_np_dtype(TensorProto.FLOAT)
print(f"The converted numpy dtype for {helper.tensor_dtype_to_string(TensorProto.FLOAT)} is {np_dtype}.")
storage_dtype = helper.tensor_dtype_to_storage_tensor_dtype(TensorProto.FLOAT)
print(f"The storage dtype for {helper.tensor_dtype_to_string(TensorProto.FLOAT)} is {helper.tensor_dtype_to_string(storage_dtype)}.")
field_name = helper.tensor_dtype_to_field(TensorProto.FLOAT)
print(f"The field name for {helper.tensor_dtype_to_string(TensorProto.FLOAT)} is {field_name}.")
tensor_dtype = helper.np_dtype_to_tensor_dtype(np_dtype)
print(f"The tensor data type for numpy dtype: {np_dtype} is {helper.tensor_dtype_to_string(tensor_dtype)}.")
for tensor_dtype in helper.get_all_tensor_dtypes():
print(helper.tensor_dtype_to_string(tensor_dtype))
可运行的 IPython 笔记本
使用辅助函数创建 ONNX 模型¶
import onnx
from onnx import helper
from onnx import AttributeProto, TensorProto, GraphProto
# The protobuf definition can be found here:
# https://github.com/onnx/onnx/blob/main/onnx/onnx.proto
# Create one input (ValueInfoProto)
X = helper.make_tensor_value_info("X", TensorProto.FLOAT, [3, 2])
pads = helper.make_tensor_value_info("pads", TensorProto.FLOAT, [1, 4])
value = helper.make_tensor_value_info("value", AttributeProto.FLOAT, [1])
# Create one output (ValueInfoProto)
Y = helper.make_tensor_value_info("Y", TensorProto.FLOAT, [3, 4])
# Create a node (NodeProto) - This is based on Pad-11
node_def = helper.make_node(
"Pad", # name
["X", "pads", "value"], # inputs
["Y"], # outputs
mode="constant", # attributes
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_def], # nodes
"test-model", # name
[X, pads, value], # inputs
[Y], # outputs
)
# Create the model (ModelProto)
model_def = helper.make_model(graph_def, producer_name="onnx-example")
print(f"The model is:\n{model_def}")
onnx.checker.check_model(model_def)
print("The model is checked!")
可运行的 IPython 笔记本
用于映射 ONNX IR 中属性的转换实用程序¶
from onnx import TensorProto, helper
np_dtype = helper.tensor_dtype_to_np_dtype(TensorProto.FLOAT)
print(f"The converted numpy dtype for {helper.tensor_dtype_to_string(TensorProto.FLOAT)} is {np_dtype}.")
field_name = helper.tensor_dtype_to_field(TensorProto.FLOAT)
print(f"The field name for {helper.tensor_dtype_to_string(TensorProto.FLOAT)} is {field_name}.")
# There are other useful conversion utilities. Please checker onnx.helper
检查 ONNX 模型¶
import onnx
# Preprocessing: load the ONNX model
model_path = "path/to/the/model.onnx"
onnx_model = onnx.load(model_path)
print(f"The model is:\n{onnx_model}")
# Check the model
try:
onnx.checker.check_model(onnx_model)
except onnx.checker.ValidationError as e:
print(f"The model is invalid: {e}")
else:
print("The model is valid!")
可运行的 IPython 笔记本
检查大于 2GB 的大型 ONNX 模型¶
当前检查器支持检查具有外部数据的模型,但对于大于 2GB 的模型,请将模型路径用于 onnx.checker,并且外部数据需要位于同一目录下。
import onnx
onnx.checker.check_model("path/to/the/model.onnx")
# onnx.checker.check_model(loaded_onnx_model) will fail if given >2GB model
对 ONNX 模型运行形状推断¶
import onnx
from onnx import helper, shape_inference
from onnx import TensorProto
# Preprocessing: create a model with two nodes, Y"s shape is unknown
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])
node2 = helper.make_node("Transpose", ["Y"], ["Z"], perm=[1, 0, 2])
graph = helper.make_graph(
[node1, node2],
"two-transposes",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4))],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, (2, 3, 4))],
)
original_model = helper.make_model(graph, producer_name="onnx-examples")
# Check the model and print Y"s shape information
onnx.checker.check_model(original_model)
print(f"Before shape inference, the shape info of Y is:\n{original_model.graph.value_info}")
# Apply shape inference on the model
inferred_model = shape_inference.infer_shapes(original_model)
# Check the model and print Y"s shape information
onnx.checker.check_model(inferred_model)
print(f"After shape inference, the shape info of Y is:\n{inferred_model.graph.value_info}")
可运行的 IPython 笔记本
对大于 2GB 的大型 ONNX 模型进行形状推断¶
当前形状推断支持具有外部数据的模型,但对于大于 2GB 的模型,请将模型路径用于 onnx.shape_inference.infer_shapes_path,并且外部数据需要位于同一目录下。您可以指定保存推断模型的输出路径;否则,默认输出路径与原始模型路径相同。
import onnx
# output the inferred model to the original model path
onnx.shape_inference.infer_shapes_path("path/to/the/model.onnx")
# output the inferred model to the specified model path
onnx.shape_inference.infer_shapes_path("path/to/the/model.onnx", "output/inferred/model.onnx")
# inferred_model = onnx.shape_inference.infer_shapes(loaded_onnx_model) will fail if given >2GB model
对 ONNX 函数运行类型推断¶
import onnx
import onnx.helper
import onnx.parser
import onnx.shape_inference
function_text = """
<opset_import: [ "" : 18 ], domain: "local">
CastTo <dtype> (x) => (y) {
y = Cast <to : int = @dtype> (x)
}
"""
function = onnx.parser.parse_function(function_text)
# The function above has one input-parameter x, and one attribute-parameter dtype.
# To apply type-and-shape-inference to this function, we must supply the type of
# input-parameter and an attribute value for the attribute-parameter as below:
float_type_ = onnx.helper.make_tensor_type_proto(1, None)
dtype_6 = onnx.helper.make_attribute("dtype", 6)
result = onnx.shape_inference.infer_function_output_types(
function, [float_type_], [dtype_6]
)
print(result) # a list containing the (single) output type
转换默认域(“”/“ai.onnx”)内 ONNX 模型的版本¶
import onnx
from onnx import version_converter, helper
# Preprocessing: load the model to be converted.
model_path = "path/to/the/model.onnx"
original_model = onnx.load(model_path)
print(f"The model before conversion:\n{original_model}")
# A full list of supported adapters can be found here:
# https://github.com/onnx/onnx/blob/main/onnx/version_converter.py#L21
# Apply the version conversion on the original model
converted_model = version_converter.convert_version(original_model, <int target_version>)
print(f"The model after conversion:\n{converted_model}")
实用程序函数¶
使用输入输出张量名称提取子模型¶
函数 extract_model()
从 ONNX 模型中提取子模型。子模型由输入和输出张量的名称 *完全* 定义。
import onnx
input_path = "path/to/the/original/model.onnx"
output_path = "path/to/save/the/extracted/model.onnx"
input_names = ["input_0", "input_1", "input_2"]
output_names = ["output_0", "output_1"]
onnx.utils.extract_model(input_path, output_path, input_names, output_names)
注意:对于控制流运算符,例如 If 和 Loop,*子模型的边界*(由输入和输出张量定义)不应 *穿过* 作为这些运算符属性连接到 *主图* 的子图。
ONNX Compose¶
onnx.compose
模块提供创建组合模型的工具。
onnx.compose.merge_models
可用于合并两个模型,方法是将第一个模型中的一些输出与第二个模型中的输入连接起来。默认情况下,io_map
参数中不存在的输入/输出将保留为组合模型的输入/输出。
在此示例中,我们通过将第一个模型的每个输出连接到第二个模型中的输入来合并两个模型。生成的模型将具有与第一个模型相同的输入以及与第二个模型相同的输出
import onnx
model1 = onnx.load("path/to/model1.onnx")
# agraph (float[N] A, float[N] B) => (float[N] C, float[N] D)
# {
# C = Add(A, B)
# D = Sub(A, B)
# }
model2 = onnx.load("path/to/model2.onnx")
# agraph (float[N] X, float[N] Y) => (float[N] Z)
# {
# Z = Mul(X, Y)
# }
combined_model = onnx.compose.merge_models(
model1, model2,
io_map=[("C", "X"), ("D", "Y")]
)
此外,用户可以指定要包含在组合模型中的 inputs
/outputs
列表,从而有效地删除对组合模型输出没有贡献的图的一部分。在以下示例中,我们将第一个模型的两个输出中的一个连接到第二个模型中的两个输入。通过显式指定组合模型的输出,我们丢弃了第一个模型中未使用的输出以及相关图的一部分
import onnx
# Default case. Include all outputs in the combined model
combined_model = onnx.compose.merge_models(
model1, model2,
io_map=[("C", "X"), ("C", "Y")],
) # outputs: "D", "Z"
# Explicit outputs. "Y" output and the Sub node are not present in the combined model
combined_model = onnx.compose.merge_models(
model1, model2,
io_map=[("C", "X"), ("C", "Y")],
outputs=["Z"],
) # outputs: "Z"
onnx.compose.add_prefix
允许您在模型中的名称前添加前缀,以避免在合并它们时发生名称冲突。默认情况下,它会重命名图中的所有名称:输入、输出、边、节点、初始化程序、稀疏初始化程序和值信息。
import onnx
model = onnx.load("path/to/the/model.onnx")
# model - outputs: ["out0", "out1"], inputs: ["in0", "in1"]
new_model = onnx.compose.add_prefix(model, prefix="m1/")
# new_model - outputs: ["m1/out0", "m1/out1"], inputs: ["m1/in0", "m1/in1"]
# Can also be run in-place
onnx.compose.add_prefix(model, prefix="m1/", inplace=True)
onnx.compose.expand_out_dim
可用于连接期望不同维度数量的模型,方法是插入范围为 1 的维度。当组合产生样本的模型和处理样本批次的模型时,这很有用。
import onnx
# outputs: "out0", shape=[200, 200, 3]
model1 = onnx.load("path/to/the/model1.onnx")
# outputs: "in0", shape=[N, 200, 200, 3]
model2 = onnx.load("path/to/the/model2.onnx")
# outputs: "out0", shape=[1, 200, 200, 3]
new_model1 = onnx.compose.expand_out_dims(model1, dim_idx=0)
# Models can now be merged
combined_model = onnx.compose.merge_models(
new_model1, model2, io_map=[("out0", "in0")]
)
# Can also be run in-place
onnx.compose.expand_out_dims(model1, dim_idx=0, inplace=True)
工具¶
使用可变长度更新模型的输入输出维度大小¶
函数 update_inputs_outputs_dims
会根据参数中提供的值更新模型输入和输出的维度。您可以使用 dim_param 提供静态和动态维度大小。有关静态和动态维度大小的更多信息,请查看 张量形状。
更新输入/输出大小后,该函数会运行模型检查器。
import onnx
from onnx.tools import update_model_dims
model = onnx.load("path/to/the/model.onnx")
# Here both "seq", "batch" and -1 are dynamic using dim_param.
variable_length_model = update_model_dims.update_inputs_outputs_dims(model, {"input_name": ["seq", "batch", 3, -1]}, {"output_name": ["seq", "batch", 1, -1]})
ONNX 解析器¶
函数 onnx.parser.parse_model
和 onnx.parser.parse_graph
可用于从文本表示创建 ONNX 模型或图,如下所示。有关语言语法的更多详细信息,请参阅 语言语法。
input = """
agraph (float[N, 128] X, float[128, 10] W, float[10] B) => (float[N, 10] C)
{
T = MatMul(X, W)
S = Add(T, B)
C = Softmax(S)
}
"""
graph = onnx.parser.parse_graph(input)
input = """
<
ir_version: 7,
opset_import: ["" : 10]
>
agraph (float[N, 128] X, float[128, 10] W, float[10] B) => (float[N, 10] C)
{
T = MatMul(X, W)
S = Add(T, B)
C = Softmax(S)
}
"""
model = onnx.parser.parse_model(input)
ONNX 内联器¶
函数 onnx.inliner.inline_local_functions
和 inline_selected_functions
可用于内联 ONNX 模型中的模型局部函数。特别是,inline_local_functions
可用于生成无函数模型(适用于不处理或不支持函数的后端)。另一方面,inline_selected_functions
可用于内联选定的函数。目前尚不支持内联作为函数的 ONNX 标准操作(也称为模式定义函数)。
import onnx
import onnx.inliner
model = onnx.load("path/to/the/model.onnx")
inlined = onnx.inliner.inline_local_functions(model)
onnx.save("path/to/the/inlinedmodel.onnx")