1. 系统环境
硬件环境(Ascend/GPU/CPU): CPU/GPU/Ascend
MindSpore版本: mindspore=2.3.1
执行模式(PyNative/ Graph):PyNative/ Graph
Python版本: Python=3.9
操作系统平台: linux
2. 报错信息
使用mindsporelite推理报错RuntimeError: data size not equal! Numpy size: 9216, Tensor size: 0
1.生成一个动态shape的onnx
import torch
from torch import Tensor
import math
import numpy as np
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, a, b):
result = torch.bmm(a, b)
return result
a = Tensor(np.ones([3, 16, 48], dtype=np.float32))
b = Tensor(np.ones([3, 48, 16], dtype=np.float32))
model = Model()
result = model(a,b)
print(result)
torch.onnx.export(model, (a,b), 'bmm_dynamic.onnx',input_names = ['a','b'], # the model's input names
output_names = ['output'], # the model's output names
dynamic_axes={'a' : {1:'m',2:'n'}, # variable lenght axes
'b' : {1:'n',2:'m'},
'output' : {1:'m',2:'m'}})
确认生成后的onnx文件
2.将onnx文件通过mindspore lite云测转换工具转换成mslite
converter_lite --fmk=ONNX --saveType=MINDIR --optimize=none --modelFile=bmm_dynamic.onnx --outputFile=bmm_dynamic
3.通过mindspore lite云测python接口进行推理
import numpy as np
import mindspore_lite as mslite
# init context, and set target is cpu
context = mslite.Context()
context.target = ["cpu"]
context.cpu.thread_num = 1
context.cpu.thread_affinity_mode=2
MODEL_PATH = "./bmm_dynamic.mindir"
model = mslite.Model()
model.build_from_file(MODEL_PATH, mslite.ModelType.MINDIR, context)
# set model input
inputs = model.get_inputs()
a_data = np.ones([3, 16, 48], dtype=np.float32)
inputs[0].set_data_from_numpy(a_data)
b_data = np.ones([3, 48, 16], dtype=np.float32)
inputs[1].set_data_from_numpy(b_data)
# execute inference
outputs = model.predict(inputs)
data = outputs[0].get_data_to_numpy()
print(data)
执行结果
Traceback (most recent call last):
File "/mnt/onnx/infer.py", line 18, in <module>
inputs[0].set_data_from_numpy(a_data)
File "/mnt/lib/python3.9/site-packages/mindspore_lite/tensor.py", line 603, in set_data_from_numpy
raise RuntimeError(
RuntimeError: data size not equal! Numpy size: 9216, Tensor size: 0
3. 解决方案
当输入是动态shape时(模型输入的shape的维度包含-1),必须通过 resize 把-1换成固定维度。
在inputs = model.get_inputs() 后面加上打印
print(inputs[0].shape)
print(inputs[1].shape)
得到的结果是
[3, -1, -1]
[3, -1, -1]
需要增加model.resize(inputs, [[3, 16, 48],[3, 48, 16]])
修改后代码如下
import numpy as np
import mindspore_lite as mslite
# init context, and set target is cpu
context = mslite.Context()
context.target = ["cpu"]
context.cpu.thread_num = 1
context.cpu.thread_affinity_mode=2
MODEL_PATH = "./bmm_dynamic.mindir"
model = mslite.Model()
model.build_from_file(MODEL_PATH, mslite.ModelType.MINDIR, context)
# set model input
inputs = model.get_inputs()
print(inputs[0].shape)
print(inputs[1].shape)
model.resize(inputs, [[3, 16, 48],[3, 48, 16]])
a_data = np.ones([3, 16, 48], dtype=np.float32)
inputs[0].set_data_from_numpy(a_data)
b_data = np.ones([3, 48, 16], dtype=np.float32)
inputs[1].set_data_from_numpy(b_data)
# execute inference
outputs = model.predict(inputs)
data = outputs[0].get_data_to_numpy()
print(data)
