Bootstrap

Tensorflow模型转换onnx模型

1.查看pb文件的输入输出

一般cv相关的输入只有一个输入,即图片,但输出却可能有多个,可以查看训练模型等确定输出。

import tensorflow.compat.v1 as tf

PATH_TO_CKPT = "/xxx/yyyy/resnet.pb"
def create_graph():
    with tf.gfile.FastGFile(PATH_TO_CKPT, 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        tf.import_graph_def(graph_def, name='')
 
create_graph()
tensor_name_list = [tensor.name for tensor in tf.get_default_graph().as_graph_def().node]
for tensor_name in tensor_name_list:
    print(tensor_name,'\n')

2.使用tf2onnx转换

import tf2onnx

PATH_TO_CKPT = "/xxx/yyyy/resnet.pb"
output_path = "/xxx/yyyy/resnet.onnx"

#以下两个函数的输入输出要和pb文件的输入输出一致,需要在名字后面加“:0”
graph_def, _, _ = tf2onnx.tf_loader.from_graphdef(
    PATH_TO_CKPT, ["input_1:0"], ["predictions/Softmax:0"])

# 可以rename简化名称,tensors_to_rename = {"pred:0": "pred", "X:0": "X"}
model_proto, _ = tf2onnx.convert.from_graph_def(graph_def, input_names=["input_1:0"], 
                                                output_names=["predictions/Softmax:0"],
                                                opset=13, output_path=output_path,)
output_names = [n.name for n in model_proto.graph.output]
print(output_names)

3.使用onnx预测

#加载图片,输入与tensorflow的输入一致
import imutils
import cv2
import onnxruntime
import numpy as np

image = imutils.url_to_image(img_url)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image,(224,224))
image_np_expanded = np.expand_dims(image, axis=0)
#加载onnx模型
#使用gpu可以providers=["CUDAExecutionProvider"]
model = onnxruntime.InferenceSession(output_path,providers=["CPUExecutionProvider"])
#输入输出的名字要和模型转换时的设置一致
result = model.run(["predictions/Softmax:0"],{"input_1:0":image_np_expanded.astype(np.float32)})
print(result)
#和tensorflow的结果preds进行比较
#np.testing.assert_allclose(preds, result[0], rtol=1e-5)
;