-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path2onnyx.py
66 lines (45 loc) · 1.79 KB
/
2onnyx.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import torch
from torch.nn import DataParallel, Sequential
import argparse
import h5py
import torch.onnx
import torchvision
#import uff
import numpy as np
import models.models as md
def weldon2resnet(name):
model = md.ResNet_weldon(weldon_pretrained_path=name)
model = model.base_layer
model.add_module('8', torch.nn.AvgPool2d( (7,7), (1,1) ) )
modules = list(model.children())
model = Sequential(*modules)
return model
def toonnx(model, saveName):
dummy_input = torch.randn(20, 3, 224, 224)
print('Output size:', model(dummy_input).shape)
output_names = [ "output"]
torch.onnx.export(model, dummy_input, saveName, verbose=True, output_names=output_names)
def text2onnx(model, saveName):
dummy_input = torch.randn(5,10,620)
print('Output text:', model(dummy_input).shape)
output_names = [ "output"]
torch.onnx.export(model, dummy_input, saveName, verbose=True, output_names=output_names)
def loadFullNet(modelPath):
je = md.joint_embedding()
a = torch.load(modelPath)
je.load_state_dict(a['state_dict'])
imageEmbed = Sequential(*list(je.img_emb.module.children()))
rnn = je.cap_emb
return imageEmbed, rnn
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model_name", default='data/pretrained_classif_152_2400.pth.tar')
parser.add_argument("-s", "--save_name", default='resnet.onnx')
parser.add_argument("-t", "--model_type", help="Should be weldon or full", default="weldon")
args = parser.parse_args()
if args.model_type == "weldon":
model = weldon2resnet(args.model_name)
else:
model, textmodel = loadFullNet(args.model_name)
#text2onnx(textmodel, "textEmbed.onnx")
toonnx(model, args.save_name)