convertor.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. import paddle.fluid as fluid
  16. import os
  17. import sys
  18. import paddlex as pdx
  19. import paddlex.utils.logging as logging
  20. __all__ = ['export_onnx']
  21. def export_onnx(model_dir, save_dir, fixed_input_shape):
  22. assert len(fixed_input_shape) == 2, "len of fixed input shape must == 2"
  23. model = pdx.load_model(model_dir, fixed_input_shape)
  24. model_name = os.path.basename(model_dir.strip('/')).split('/')[-1]
  25. export_onnx_model(model, save_dir)
  26. def export_onnx_model(model, save_dir, opset_version=10):
  27. if model.__class__.__name__ == "FastSCNN" or (
  28. model.model_type != "detector" and
  29. model.__class__.__name__ != "YOLOv3"):
  30. logging.error(
  31. "Only image classifier models, detection models(YOLOv3) and semantic segmentation models(except FastSCNN) are supported to export to ONNX"
  32. )
  33. try:
  34. import x2paddle
  35. if x2paddle.__version__ < '0.7.4':
  36. logging.error("You need to upgrade x2paddle >= 0.7.4")
  37. except:
  38. logging.error(
  39. "You need to install x2paddle first, pip install x2paddle>=0.7.4")
  40. if opset_version == 10 and model.__class__.__name__ == "YOLOv3":
  41. logging.error(
  42. "Export for openVINO by default, the output of multiclass_nms exported to onnx will contains background. If you need onnx completely consistent with paddle, please use X2Paddle to export"
  43. )
  44. x2paddle.op_mapper.paddle2onnx.opset10.paddle_custom_layer.multiclass_nms.multiclass_nms = multiclass_nms_for_openvino
  45. from x2paddle.op_mapper.paddle2onnx.paddle_op_mapper import PaddleOpMapper
  46. mapper = PaddleOpMapper()
  47. mapper.convert(
  48. model.test_prog,
  49. save_dir,
  50. scope=model.scope,
  51. opset_version=opset_version)
  52. def multiclass_nms_for_openvino(op, block):
  53. """
  54. Convert the paddle multiclass_nms to onnx op.
  55. This op is get the select boxes from origin boxes.
  56. This op is for OpenVINO, which donn't support dynamic shape).
  57. """
  58. print('openvino')
  59. import math
  60. import sys
  61. import numpy as np
  62. import paddle.fluid.core as core
  63. import paddle.fluid as fluid
  64. import onnx
  65. import warnings
  66. from onnx import helper, onnx_pb
  67. inputs = dict()
  68. outputs = dict()
  69. attrs = dict()
  70. for name in op.input_names:
  71. inputs[name] = op.input(name)
  72. for name in op.output_names:
  73. outputs[name] = op.output(name)
  74. for name in op.attr_names:
  75. attrs[name] = op.attr(name)
  76. result_name = outputs['Out'][0]
  77. background = attrs['background_label']
  78. normalized = attrs['normalized']
  79. if normalized == False:
  80. warnings.warn(
  81. 'The parameter normalized of multiclass_nms OP of Paddle is False, which has diff with ONNX. \
  82. Please set normalized=True in multiclass_nms of Paddle'
  83. )
  84. #convert the paddle attribute to onnx tensor
  85. name_score_threshold = [outputs['Out'][0] + "@score_threshold"]
  86. name_iou_threshold = [outputs['Out'][0] + "@iou_threshold"]
  87. name_keep_top_k = [outputs['Out'][0] + '@keep_top_k']
  88. name_keep_top_k_2D = [outputs['Out'][0] + '@keep_top_k_1D']
  89. node_score_threshold = onnx.helper.make_node(
  90. 'Constant',
  91. inputs=[],
  92. outputs=name_score_threshold,
  93. value=onnx.helper.make_tensor(
  94. name=name_score_threshold[0] + "@const",
  95. data_type=onnx.TensorProto.FLOAT,
  96. dims=(),
  97. vals=[float(attrs['score_threshold'])]))
  98. node_iou_threshold = onnx.helper.make_node(
  99. 'Constant',
  100. inputs=[],
  101. outputs=name_iou_threshold,
  102. value=onnx.helper.make_tensor(
  103. name=name_iou_threshold[0] + "@const",
  104. data_type=onnx.TensorProto.FLOAT,
  105. dims=(),
  106. vals=[float(attrs['nms_threshold'])]))
  107. node_keep_top_k = onnx.helper.make_node(
  108. 'Constant',
  109. inputs=[],
  110. outputs=name_keep_top_k,
  111. value=onnx.helper.make_tensor(
  112. name=name_keep_top_k[0] + "@const",
  113. data_type=onnx.TensorProto.INT64,
  114. dims=(),
  115. vals=[np.int64(attrs['keep_top_k'])]))
  116. node_keep_top_k_2D = onnx.helper.make_node(
  117. 'Constant',
  118. inputs=[],
  119. outputs=name_keep_top_k_2D,
  120. value=onnx.helper.make_tensor(
  121. name=name_keep_top_k_2D[0] + "@const",
  122. data_type=onnx.TensorProto.INT64,
  123. dims=[1, 1],
  124. vals=[np.int64(attrs['keep_top_k'])]))
  125. # the paddle data format is x1,y1,x2,y2
  126. kwargs = {'center_point_box': 0}
  127. name_select_nms = [outputs['Out'][0] + "@select_index"]
  128. node_select_nms= onnx.helper.make_node(
  129. 'NonMaxSuppression',
  130. inputs=inputs['BBoxes'] + inputs['Scores'] + name_keep_top_k +\
  131. name_iou_threshold + name_score_threshold,
  132. outputs=name_select_nms)
  133. # step 1 nodes select the nms class
  134. node_list = [
  135. node_score_threshold, node_iou_threshold, node_keep_top_k,
  136. node_keep_top_k_2D, node_select_nms
  137. ]
  138. # create some const value to use
  139. name_const_value = [result_name+"@const_0",
  140. result_name+"@const_1",\
  141. result_name+"@const_2",\
  142. result_name+"@const_-1"]
  143. value_const_value = [0, 1, 2, -1]
  144. for name, value in zip(name_const_value, value_const_value):
  145. node = onnx.helper.make_node(
  146. 'Constant',
  147. inputs=[],
  148. outputs=[name],
  149. value=onnx.helper.make_tensor(
  150. name=name + "@const",
  151. data_type=onnx.TensorProto.INT64,
  152. dims=[1],
  153. vals=[value]))
  154. node_list.append(node)
  155. # In this code block, we will deocde the raw score data, reshape N * C * M to 1 * N*C*M
  156. # and the same time, decode the select indices to 1 * D, gather the select_indices
  157. outputs_gather_1_ = [result_name + "@gather_1_"]
  158. node_gather_1_ = onnx.helper.make_node(
  159. 'Gather',
  160. inputs=name_select_nms + [result_name + "@const_1"],
  161. outputs=outputs_gather_1_,
  162. axis=1)
  163. node_list.append(node_gather_1_)
  164. outputs_gather_1 = [result_name + "@gather_1"]
  165. node_gather_1 = onnx.helper.make_node(
  166. 'Unsqueeze',
  167. inputs=outputs_gather_1_,
  168. outputs=outputs_gather_1,
  169. axes=[0])
  170. node_list.append(node_gather_1)
  171. outputs_gather_2_ = [result_name + "@gather_2_"]
  172. node_gather_2_ = onnx.helper.make_node(
  173. 'Gather',
  174. inputs=name_select_nms + [result_name + "@const_2"],
  175. outputs=outputs_gather_2_,
  176. axis=1)
  177. node_list.append(node_gather_2_)
  178. outputs_gather_2 = [result_name + "@gather_2"]
  179. node_gather_2 = onnx.helper.make_node(
  180. 'Unsqueeze',
  181. inputs=outputs_gather_2_,
  182. outputs=outputs_gather_2,
  183. axes=[0])
  184. node_list.append(node_gather_2)
  185. # reshape scores N * C * M to (N*C*M) * 1
  186. outputs_reshape_scores_rank1 = [result_name + "@reshape_scores_rank1"]
  187. node_reshape_scores_rank1 = onnx.helper.make_node(
  188. "Reshape",
  189. inputs=inputs['Scores'] + [result_name + "@const_-1"],
  190. outputs=outputs_reshape_scores_rank1)
  191. node_list.append(node_reshape_scores_rank1)
  192. # get the shape of scores
  193. outputs_shape_scores = [result_name + "@shape_scores"]
  194. node_shape_scores = onnx.helper.make_node(
  195. 'Shape', inputs=inputs['Scores'], outputs=outputs_shape_scores)
  196. node_list.append(node_shape_scores)
  197. # gather the index: 2 shape of scores
  198. outputs_gather_scores_dim1 = [result_name + "@gather_scores_dim1"]
  199. node_gather_scores_dim1 = onnx.helper.make_node(
  200. 'Gather',
  201. inputs=outputs_shape_scores + [result_name + "@const_2"],
  202. outputs=outputs_gather_scores_dim1,
  203. axis=0)
  204. node_list.append(node_gather_scores_dim1)
  205. # mul class * M
  206. outputs_mul_classnum_boxnum = [result_name + "@mul_classnum_boxnum"]
  207. node_mul_classnum_boxnum = onnx.helper.make_node(
  208. 'Mul',
  209. inputs=outputs_gather_1 + outputs_gather_scores_dim1,
  210. outputs=outputs_mul_classnum_boxnum)
  211. node_list.append(node_mul_classnum_boxnum)
  212. # add class * M * index
  213. outputs_add_class_M_index = [result_name + "@add_class_M_index"]
  214. node_add_class_M_index = onnx.helper.make_node(
  215. 'Add',
  216. inputs=outputs_mul_classnum_boxnum + outputs_gather_2,
  217. outputs=outputs_add_class_M_index)
  218. node_list.append(node_add_class_M_index)
  219. # Squeeze the indices to 1 dim
  220. outputs_squeeze_select_index = [result_name + "@squeeze_select_index"]
  221. node_squeeze_select_index = onnx.helper.make_node(
  222. 'Squeeze',
  223. inputs=outputs_add_class_M_index,
  224. outputs=outputs_squeeze_select_index,
  225. axes=[0, 2])
  226. node_list.append(node_squeeze_select_index)
  227. # gather the data from flatten scores
  228. outputs_gather_select_scores = [result_name + "@gather_select_scores"]
  229. node_gather_select_scores = onnx.helper.make_node('Gather',
  230. inputs=outputs_reshape_scores_rank1 + \
  231. outputs_squeeze_select_index,
  232. outputs=outputs_gather_select_scores,
  233. axis=0)
  234. node_list.append(node_gather_select_scores)
  235. # get nums to input TopK
  236. outputs_shape_select_num = [result_name + "@shape_select_num"]
  237. node_shape_select_num = onnx.helper.make_node(
  238. 'Shape',
  239. inputs=outputs_gather_select_scores,
  240. outputs=outputs_shape_select_num)
  241. node_list.append(node_shape_select_num)
  242. outputs_gather_select_num = [result_name + "@gather_select_num"]
  243. node_gather_select_num = onnx.helper.make_node(
  244. 'Gather',
  245. inputs=outputs_shape_select_num + [result_name + "@const_0"],
  246. outputs=outputs_gather_select_num,
  247. axis=0)
  248. node_list.append(node_gather_select_num)
  249. outputs_unsqueeze_select_num = [result_name + "@unsqueeze_select_num"]
  250. node_unsqueeze_select_num = onnx.helper.make_node(
  251. 'Unsqueeze',
  252. inputs=outputs_gather_select_num,
  253. outputs=outputs_unsqueeze_select_num,
  254. axes=[0])
  255. node_list.append(node_unsqueeze_select_num)
  256. outputs_concat_topK_select_num = [result_name + "@conat_topK_select_num"]
  257. node_conat_topK_select_num = onnx.helper.make_node(
  258. 'Concat',
  259. inputs=outputs_unsqueeze_select_num + name_keep_top_k_2D,
  260. outputs=outputs_concat_topK_select_num,
  261. axis=0)
  262. node_list.append(node_conat_topK_select_num)
  263. outputs_cast_concat_topK_select_num = [
  264. result_name + "@concat_topK_select_num"
  265. ]
  266. node_outputs_cast_concat_topK_select_num = onnx.helper.make_node(
  267. 'Cast',
  268. inputs=outputs_concat_topK_select_num,
  269. outputs=outputs_cast_concat_topK_select_num,
  270. to=6)
  271. node_list.append(node_outputs_cast_concat_topK_select_num)
  272. # get min(topK, num_select)
  273. outputs_compare_topk_num_select = [
  274. result_name + "@compare_topk_num_select"
  275. ]
  276. node_compare_topk_num_select = onnx.helper.make_node(
  277. 'ReduceMin',
  278. inputs=outputs_cast_concat_topK_select_num,
  279. outputs=outputs_compare_topk_num_select,
  280. keepdims=0)
  281. node_list.append(node_compare_topk_num_select)
  282. # unsqueeze the indices to 1D tensor
  283. outputs_unsqueeze_topk_select_indices = [
  284. result_name + "@unsqueeze_topk_select_indices"
  285. ]
  286. node_unsqueeze_topk_select_indices = onnx.helper.make_node(
  287. 'Unsqueeze',
  288. inputs=outputs_compare_topk_num_select,
  289. outputs=outputs_unsqueeze_topk_select_indices,
  290. axes=[0])
  291. node_list.append(node_unsqueeze_topk_select_indices)
  292. # cast the indices to INT64
  293. outputs_cast_topk_indices = [result_name + "@cast_topk_indices"]
  294. node_cast_topk_indices = onnx.helper.make_node(
  295. 'Cast',
  296. inputs=outputs_unsqueeze_topk_select_indices,
  297. outputs=outputs_cast_topk_indices,
  298. to=7)
  299. node_list.append(node_cast_topk_indices)
  300. # select topk scores indices
  301. outputs_topk_select_topk_indices = [result_name + "@topk_select_topk_values",\
  302. result_name + "@topk_select_topk_indices"]
  303. node_topk_select_topk_indices = onnx.helper.make_node(
  304. 'TopK',
  305. inputs=outputs_gather_select_scores + outputs_cast_topk_indices,
  306. outputs=outputs_topk_select_topk_indices)
  307. node_list.append(node_topk_select_topk_indices)
  308. # gather topk label, scores, boxes
  309. outputs_gather_topk_scores = [result_name + "@gather_topk_scores"]
  310. node_gather_topk_scores = onnx.helper.make_node(
  311. 'Gather',
  312. inputs=outputs_gather_select_scores +
  313. [outputs_topk_select_topk_indices[1]],
  314. outputs=outputs_gather_topk_scores,
  315. axis=0)
  316. node_list.append(node_gather_topk_scores)
  317. outputs_gather_topk_class = [result_name + "@gather_topk_class"]
  318. node_gather_topk_class = onnx.helper.make_node(
  319. 'Gather',
  320. inputs=outputs_gather_1 + [outputs_topk_select_topk_indices[1]],
  321. outputs=outputs_gather_topk_class,
  322. axis=1)
  323. node_list.append(node_gather_topk_class)
  324. # gather the boxes need to gather the boxes id, then get boxes
  325. outputs_gather_topk_boxes_id = [result_name + "@gather_topk_boxes_id"]
  326. node_gather_topk_boxes_id = onnx.helper.make_node(
  327. 'Gather',
  328. inputs=outputs_gather_2 + [outputs_topk_select_topk_indices[1]],
  329. outputs=outputs_gather_topk_boxes_id,
  330. axis=1)
  331. node_list.append(node_gather_topk_boxes_id)
  332. # squeeze the gather_topk_boxes_id to 1 dim
  333. outputs_squeeze_topk_boxes_id = [result_name + "@squeeze_topk_boxes_id"]
  334. node_squeeze_topk_boxes_id = onnx.helper.make_node(
  335. 'Squeeze',
  336. inputs=outputs_gather_topk_boxes_id,
  337. outputs=outputs_squeeze_topk_boxes_id,
  338. axes=[0, 2])
  339. node_list.append(node_squeeze_topk_boxes_id)
  340. outputs_gather_select_boxes = [result_name + "@gather_select_boxes"]
  341. node_gather_select_boxes = onnx.helper.make_node(
  342. 'Gather',
  343. inputs=inputs['BBoxes'] + outputs_squeeze_topk_boxes_id,
  344. outputs=outputs_gather_select_boxes,
  345. axis=1)
  346. node_list.append(node_gather_select_boxes)
  347. # concat the final result
  348. # before concat need to cast the class to float
  349. outputs_cast_topk_class = [result_name + "@cast_topk_class"]
  350. node_cast_topk_class = onnx.helper.make_node(
  351. 'Cast',
  352. inputs=outputs_gather_topk_class,
  353. outputs=outputs_cast_topk_class,
  354. to=1)
  355. node_list.append(node_cast_topk_class)
  356. outputs_unsqueeze_topk_scores = [result_name + "@unsqueeze_topk_scores"]
  357. node_unsqueeze_topk_scores = onnx.helper.make_node(
  358. 'Unsqueeze',
  359. inputs=outputs_gather_topk_scores,
  360. outputs=outputs_unsqueeze_topk_scores,
  361. axes=[0, 2])
  362. node_list.append(node_unsqueeze_topk_scores)
  363. inputs_concat_final_results = outputs_cast_topk_class + outputs_unsqueeze_topk_scores +\
  364. outputs_gather_select_boxes
  365. outputs_sort_by_socre_results = [result_name + "@concat_topk_scores"]
  366. node_sort_by_socre_results = onnx.helper.make_node(
  367. 'Concat',
  368. inputs=inputs_concat_final_results,
  369. outputs=outputs_sort_by_socre_results,
  370. axis=2)
  371. node_list.append(node_sort_by_socre_results)
  372. # select topk classes indices
  373. outputs_squeeze_cast_topk_class = [
  374. result_name + "@squeeze_cast_topk_class"
  375. ]
  376. node_squeeze_cast_topk_class = onnx.helper.make_node(
  377. 'Squeeze',
  378. inputs=outputs_cast_topk_class,
  379. outputs=outputs_squeeze_cast_topk_class,
  380. axes=[0, 2])
  381. node_list.append(node_squeeze_cast_topk_class)
  382. outputs_neg_squeeze_cast_topk_class = [
  383. result_name + "@neg_squeeze_cast_topk_class"
  384. ]
  385. node_neg_squeeze_cast_topk_class = onnx.helper.make_node(
  386. 'Neg',
  387. inputs=outputs_squeeze_cast_topk_class,
  388. outputs=outputs_neg_squeeze_cast_topk_class)
  389. node_list.append(node_neg_squeeze_cast_topk_class)
  390. outputs_topk_select_classes_indices = [result_name + "@topk_select_topk_classes_scores",\
  391. result_name + "@topk_select_topk_classes_indices"]
  392. node_topk_select_topk_indices = onnx.helper.make_node(
  393. 'TopK',
  394. inputs=outputs_neg_squeeze_cast_topk_class + outputs_cast_topk_indices,
  395. outputs=outputs_topk_select_classes_indices)
  396. node_list.append(node_topk_select_topk_indices)
  397. outputs_concat_final_results = outputs['Out']
  398. node_concat_final_results = onnx.helper.make_node(
  399. 'Gather',
  400. inputs=outputs_sort_by_socre_results +
  401. [outputs_topk_select_classes_indices[1]],
  402. outputs=outputs_concat_final_results,
  403. axis=1)
  404. node_list.append(node_concat_final_results)
  405. return node_list