-
Notifications
You must be signed in to change notification settings - Fork 19
/
Copy pathto_onnx.py
31 lines (25 loc) · 1.29 KB
/
to_onnx.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import torch.onnx
from Net import Net
def Convert_ONNX(onnxFile):
# set the model to inference mode
model.eval()
# Let's create a dummy input tensor
dummy_input = torch.randn(1, 3, 60, 160, requires_grad=True)
# Export the model
torch.onnx.export(model, # model being run
dummy_input, # model input (or a tuple for multiple inputs)
onnxFile, # where to save the model
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['modelInput'], # the model's input names
output_names=['modelOutput'], # the model's output names
dynamic_axes={'modelInput': {0: 'batch_size'}, # variable length axes
'modelOutput': {0: 'batch_size'}})
print('Model has been converted to ONNX')
if __name__ == "__main__":
model = Net()
path = "model.pth"
onnxFile = "mathcode.onnx"
model.load_state_dict(torch.load(path, map_location=torch.device('cpu'))['model_state_dict'])
Convert_ONNX(onnxFile)