소스 검색

add python deploy

will-jl944 4 년 전
부모
커밋
5daf6c287d
3개의 변경된 파일64개의 추가작업 그리고 17개의 파일을 삭제
  1. 21 6
      paddlex/cv/models/segmenter.py
  2. 6 3
      paddlex/cv/models/utils/infer_nets.py
  3. 37 8
      paddlex/deploy.py

+ 21 - 6
paddlex/cv/models/segmenter.py

@@ -15,6 +15,7 @@
 import math
 import os.path as osp
 import numpy as np
+import cv2
 from collections import OrderedDict
 import paddle
 import paddle.nn.functional as F
@@ -107,7 +108,10 @@ class BaseSegmenter(BaseModel):
                 logit, origin_shape, transforms=inputs[2])
             label_map = paddle.argmax(
                 score_map, axis=1, keepdim=True, dtype='int32')
-            score_map = paddle.nn.functional.softmax(score_map, axis=1)
+            score_map = paddle.transpose(
+                paddle.nn.functional.softmax(
+                    score_map, axis=1),
+                perm=[0, 2, 3, 1])
             score_map = paddle.squeeze(score_map)
             label_map = paddle.squeeze(label_map)
             outputs = {'label_map': label_map, 'score_map': score_map}
@@ -464,7 +468,7 @@ class BaseSegmenter(BaseModel):
             {"label map": `label map`, "score_map": `score map`}.
             If img_file is a list, the result is a list composed of dicts with the corresponding fields:
             label_map(np.ndarray): the predicted label map
-            score_map(np.ndarray): the prediction score map
+            score_map(np.ndarray): the prediction score map (NHWC)
 
         """
         if transforms is None and not hasattr(self, 'test_transforms'):
@@ -568,19 +572,30 @@ class BaseSegmenter(BaseModel):
             batch_origin_shape, transforms)
         results = list()
         for pred, restore_list in zip(batch_pred, batch_restore_list):
-            pred = paddle.unsqueeze(pred, axis=0)
+            if not isinstance(pred, np.ndarray):
+                pred = paddle.unsqueeze(pred, axis=0)
             for item in restore_list[::-1]:
                 # TODO: 替换成cv2的interpolate(部署阶段无法使用paddle op)
                 h, w = item[1][0], item[1][1]
                 if item[0] == 'resize':
-                    pred = F.interpolate(pred, (h, w), mode='nearest')
+                    if not isinstance(pred, np.ndarray):
+                        pred = F.interpolate(pred, (h, w), mode='nearest')
+                    else:
+                        pred = cv2.resize(
+                            pred, (h, w), interpolation=cv2.INTER_NEAREST)
                 elif item[0] == 'padding':
                     x, y = item[2]
-                    pred = pred[:, :, y:y + h, x:x + w]
+                    if not isinstance(pred, np.ndarray):
+                        pred = pred[:, :, y:y + h, x:x + w]
+                    else:
+                        pred = pred[..., y:y + h, x:x + w]
                 else:
                     pass
             results.append(pred)
-        batch_pred = paddle.concat(results, axis=0)
+        if not isinstance(pred, np.ndarray):
+            batch_pred = paddle.concat(results, axis=0)
+        else:
+            batch_pred = np.stack(results, axis=0)
         return batch_pred
 
 

+ 6 - 3
paddlex/cv/models/utils/infer_nets.py

@@ -17,14 +17,17 @@ import paddle
 
 class PostProcessor(paddle.nn.Layer):
     def __init__(self, model_type):
+        super(PostProcessor, self).__init__()
         self.model_type = model_type
 
     def forward(self, net_outputs):
         if self.model_type == 'classifier':
             outputs = paddle.nn.functional.softmax(net_outputs, axis=1)
         elif self.model_type == 'segmenter':
-            outputs = paddle.squeeze(paddle.nn.functional.softmax(net_outputs, axis=1)), \
-                      paddle.squeeze(paddle.argmax(net_outputs, axis=1))
+            # score_map, label_map
+            outputs = paddle.transpose(paddle.nn.functional.softmax(net_outputs, axis=1), perm=[0, 2, 3, 1]), \
+                      paddle.transpose(paddle.argmax(net_outputs, axis=1, keepdim=True, dtype='int32'),
+                                       perm=[0, 2, 3, 1])
         else:
             outputs = net_outputs
         return outputs
@@ -37,7 +40,7 @@ class InferNet(paddle.nn.Layer):
         self.postprocessor = PostProcessor(model_type)
 
     def forward(self, x):
-        net_outputs = self.net(x)
+        net_outputs = self.net(x)[0]
         outputs = self.postprocessor(net_outputs)
 
         return outputs

+ 37 - 8
paddlex/deploy.py

@@ -14,7 +14,6 @@
 
 import os.path as osp
 import numpy as np
-import paddle.nn.functional as F
 from paddle.inference import Config
 from paddle.inference import create_predictor
 from paddle.inference import PrecisionType
@@ -146,10 +145,37 @@ class Predictor(object):
                 exit=True)
         return preprocessed_samples
 
-    def postprocess(self, net_outputs, topk=1):
+    def postprocess(self, net_outputs, topk=1, ori_shape=None,
+                    transforms=None):
         if self._model.model_type == 'classifier':
             true_topk = min(self._model.num_classes, topk)
-            preds = self._model._postprocess(net_outputs, true_topk)
+            preds = self._model._postprocess(net_outputs[0], true_topk)
+        elif self._model.model_type == 'segmenter':
+            score_map, label_map = net_outputs
+            combo = np.concatenate([score_map, label_map], axis=-1)
+            combo = self._model._postprocess(
+                combo,
+                batch_origin_shape=ori_shape,
+                transforms=transforms.transforms)
+            score_map = np.squeeze(combo[..., :-1])
+            label_map = np.squeeze(combo[..., -1])
+            if len(score_map.shape) == 3:
+                preds = {'label_map': label_map, 'score_map': score_map}
+            else:
+                preds = [{
+                    'label_map': l,
+                    'score_map': s
+                } for l, s in zip(label_map, score_map)]
+        elif self._model.model_type == 'detector':
+            net_outputs = {
+                k: v
+                for k, v in zip(['bbox', 'bbox_num', 'mask'], net_outputs)
+            }
+            preds = self._model._postprocess(net_outputs)
+        else:
+            logging.error(
+                "Invalid model type {}.".format(self._model.model_type),
+                exit=True)
 
         return preds
 
@@ -167,9 +193,6 @@ class Predictor(object):
         self.timer.inference_time_s.start()
         self.predictor.run()
         output_names = self.predictor.get_output_names()
-        if self._model.model_type == 'classifier':
-            net_outputs = F.softmax(self.predictor.get_output_handle(name))
-
         net_outputs = list()
         for name in output_names:
             output_tensor = self.predictor.get_output_handle(name)
@@ -204,5 +227,11 @@ class Predictor(object):
         self.timer.inference_time_s.end()
 
         self.timer.postprocess_time_s.start()
-        results = self.postprocess(net_outputs, topk)
-        print(results)
+        results = self.postprocess(
+            net_outputs,
+            topk,
+            ori_shape=preprocessed_input.get('ori_shape', None),
+            transforms=transforms)
+        self.timer.postprocess_time_s.end()
+
+        return results