Browse Source

Merge pull request #349 from syyxsxx/develop

fix openvino and raspberry bug
Jason 5 năm trước cách đây
mục cha
commit
8c04078161

+ 1 - 1
deploy/cpp/CMakeLists.txt

@@ -228,7 +228,7 @@ endif()
 if (NOT WIN32)
 if (NOT WIN32)
     set(DEPS ${DEPS}
     set(DEPS ${DEPS}
         ${MATH_LIB} ${MKLDNN_LIB}
         ${MATH_LIB} ${MKLDNN_LIB}
-        glog gflags protobuf z xxhash yaml-cpp
+        glog gflags protobuf xxhash yaml-cpp
         )
         )
     if(EXISTS "${PADDLE_DIR}/third_party/install/snappystream/lib")
     if(EXISTS "${PADDLE_DIR}/third_party/install/snappystream/lib")
         set(DEPS ${DEPS} snappystream)
         set(DEPS ${DEPS} snappystream)

+ 2 - 10
deploy/openvino/CMakeLists.txt

@@ -8,7 +8,6 @@ SET(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake" ${CMAKE_MODULE_PATH})
 SET(OPENVINO_DIR "" CACHE PATH "Location of libraries")
 SET(OPENVINO_DIR "" CACHE PATH "Location of libraries")
 SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
 SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
 SET(GFLAGS_DIR "" CACHE PATH "Location of libraries")
 SET(GFLAGS_DIR "" CACHE PATH "Location of libraries")
-SET(GLOG_DIR "" CACHE PATH "Location of libraries")
 SET(NGRAPH_LIB "" CACHE PATH "Location of libraries")
 SET(NGRAPH_LIB "" CACHE PATH "Location of libraries")
 SET(ARCH "" CACHE PATH "Location of libraries")
 SET(ARCH "" CACHE PATH "Location of libraries")
 
 
@@ -47,9 +46,6 @@ if (NOT DEFINED GFLAGS_DIR OR ${GFLAGS_DIR} STREQUAL "")
     message(FATAL_ERROR "please set GFLAGS_DIR with -DGFLAGS_DIR=/path/gflags")
     message(FATAL_ERROR "please set GFLAGS_DIR with -DGFLAGS_DIR=/path/gflags")
 endif()
 endif()
 
 
-if (NOT DEFINED GLOG_DIR OR ${GLOG_DIR} STREQUAL "")
-    message(FATAL_ERROR "please set GLOG_DIR with -DLOG_DIR=/path/glog")
-endif()
 
 
 if (NOT DEFINED NGRAPH_LIB OR ${NGRAPH_LIB} STREQUAL "")
 if (NOT DEFINED NGRAPH_LIB OR ${NGRAPH_LIB} STREQUAL "")
     message(FATAL_ERROR "please set NGRAPH_DIR with -DNGRAPH_DIR=/path/ngraph")
     message(FATAL_ERROR "please set NGRAPH_DIR with -DNGRAPH_DIR=/path/ngraph")
@@ -70,9 +66,6 @@ endif()
 link_directories("${GFLAGS_DIR}/lib")
 link_directories("${GFLAGS_DIR}/lib")
 include_directories("${GFLAGS_DIR}/include")
 include_directories("${GFLAGS_DIR}/include")
 
 
-link_directories("${GLOG_DIR}/lib")
-include_directories("${GLOG_DIR}/include")
-
 link_directories("${NGRAPH_LIB}")
 link_directories("${NGRAPH_LIB}")
 link_directories("${NGRAPH_LIB}/lib")
 link_directories("${NGRAPH_LIB}/lib")
 
 
@@ -86,7 +79,6 @@ endif ()
 include_directories(${OpenCV_INCLUDE_DIRS})
 include_directories(${OpenCV_INCLUDE_DIRS})
 
 
 if (WIN32)
 if (WIN32)
-    add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
     set(CMAKE_C_FLAGS_DEBUG   "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd")
     set(CMAKE_C_FLAGS_DEBUG   "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd")
     set(CMAKE_C_FLAGS_RELEASE  "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT")
     set(CMAKE_C_FLAGS_RELEASE  "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT")
     set(CMAKE_CXX_FLAGS_DEBUG  "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd")
     set(CMAKE_CXX_FLAGS_DEBUG  "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd")
@@ -126,11 +118,11 @@ endif(WIN32)
 
 
 if (NOT WIN32)
 if (NOT WIN32)
     set(DEPS ${DEPS}
     set(DEPS ${DEPS}
-        glog gflags  z  yaml-cpp
+        gflags yaml-cpp
         )
         )
 else()
 else()
     set(DEPS ${DEPS}
     set(DEPS ${DEPS}
-        glog gflags_static  libyaml-cppmt)
+        gflags_static  libyaml-cppmt)
     set(DEPS ${DEPS} libcmt shlwapi)
     set(DEPS ${DEPS} libcmt shlwapi)
 endif(NOT WIN32)
 endif(NOT WIN32)
 
 

+ 1 - 6
deploy/openvino/CMakeSettings.json

@@ -35,13 +35,8 @@
           "name": "WITH_STATIC_LIB",
           "name": "WITH_STATIC_LIB",
           "value": "True",
           "value": "True",
           "type": "BOOL"
           "type": "BOOL"
-        },
-        {
-          "name": "GLOG_DIR",
-          "value": "/path/to/glog",
-          "type": "PATH"
         }
         }
       ]
       ]
     }
     }
   ]
   ]
-}
+}

+ 1 - 1
deploy/openvino/demo/classifier.cpp

@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // See the License for the specific language governing permissions and
 // limitations under the License.
 // limitations under the License.
 
 
-#include <glog/logging.h>
+#include <gflags/gflags.h>
 
 
 #include <fstream>
 #include <fstream>
 #include <iostream>
 #include <iostream>

+ 1 - 2
deploy/openvino/demo/detector.cpp

@@ -12,8 +12,7 @@
 // See the License for the specific language governing permissions and
 // See the License for the specific language governing permissions and
 // limitations under the License.
 // limitations under the License.
 
 
-#include <glog/logging.h>
-#include <omp.h>
+#include <gflags/gflags.h>
 
 
 #include <algorithm>
 #include <algorithm>
 #include <chrono>  // NOLINT
 #include <chrono>  // NOLINT

+ 1 - 1
deploy/openvino/demo/segmenter.cpp

@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // See the License for the specific language governing permissions and
 // limitations under the License.
 // limitations under the License.
 
 
-#include <glog/logging.h>
+#include <gflags/gflags.h>
 
 
 #include <algorithm>
 #include <algorithm>
 #include <fstream>
 #include <fstream>

+ 1 - 1
deploy/openvino/python/converter.py

@@ -68,7 +68,7 @@ def export_openvino_model(model, args):
     onnx_parser.add_argument("--model_dir", type=_text_type)
     onnx_parser.add_argument("--model_dir", type=_text_type)
     onnx_parser.add_argument("--save_dir", type=_text_type)
     onnx_parser.add_argument("--save_dir", type=_text_type)
     onnx_parser.add_argument("--fixed_input_shape")
     onnx_parser.add_argument("--fixed_input_shape")
-    onnx_input = os.path.join(args.save_dir, 'x2paddle_model.onnx')
+    onnx_input = os.path.join(args.save_dir, 'paddle2onnx_model.onnx')
     onnx_parser.set_defaults(input_model=onnx_input)
     onnx_parser.set_defaults(input_model=onnx_input)
     onnx_parser.set_defaults(output_dir=args.save_dir)
     onnx_parser.set_defaults(output_dir=args.save_dir)
     shape_list = args.fixed_input_shape[1:-1].split(',')
     shape_list = args.fixed_input_shape[1:-1].split(',')

+ 0 - 3
deploy/openvino/scripts/build.sh

@@ -6,8 +6,6 @@ NGRAPH_LIB=$INTEL_OPENVINO_DIR/deployment_tools/ngraph/lib
 
 
 # gflags预编译库的路径
 # gflags预编译库的路径
 GFLAGS_DIR=$(pwd)/deps/gflags
 GFLAGS_DIR=$(pwd)/deps/gflags
-# glog预编译库的路径
-GLOG_DIR=$(pwd)/deps/glog
 
 
 # opencv使用自带预编译版本
 # opencv使用自带预编译版本
 OPENCV_DIR=$(pwd)/deps/opencv/
 OPENCV_DIR=$(pwd)/deps/opencv/
@@ -25,7 +23,6 @@ cd build
 cmake .. \
 cmake .. \
     -DOPENCV_DIR=${OPENCV_DIR} \
     -DOPENCV_DIR=${OPENCV_DIR} \
     -DGFLAGS_DIR=${GFLAGS_DIR} \
     -DGFLAGS_DIR=${GFLAGS_DIR} \
-    -DGLOG_DIR=${GLOG_DIR} \
     -DOPENVINO_DIR=${OPENVINO_DIR} \
     -DOPENVINO_DIR=${OPENVINO_DIR} \
     -DNGRAPH_LIB=${NGRAPH_LIB} \
     -DNGRAPH_LIB=${NGRAPH_LIB} \
     -DARCH=${ARCH}
     -DARCH=${ARCH}

+ 1 - 12
deploy/openvino/scripts/install_third-party.sh

@@ -11,22 +11,11 @@ if [ ! -d "./deps/gflag" ]; then
     cd ..
     cd ..
     cd ..
     cd ..
 fi
 fi
-if [ ! -d "./deps/glog" ]; then
-    cd deps
-    git clone https://github.com/google/glog
-    sudo apt-get install autoconf automake libtool
-    cd glog
-    ./autogen.sh
-    ./configure
-    make -j 8
-    cd ..
-    cd ..
-fi
 
 
 if [ "$ARCH" = "x86" ]; then
 if [ "$ARCH" = "x86" ]; then
     OPENCV_URL=https://bj.bcebos.com/paddlex/deploy/x86opencv/opencv.tar.bz2
     OPENCV_URL=https://bj.bcebos.com/paddlex/deploy/x86opencv/opencv.tar.bz2
 else
 else
-    OPENCV_URL=https://bj.bcebos.com/paddlex/deploy/armopencv/opencv.tar.bz2
+    OPENCV_URL=https://bj.bcebos.com/paddlex/deploy/armlinux/opencv.tar.bz2
 fi
 fi
 if [ ! -d "./deps/opencv" ]; then
 if [ ! -d "./deps/opencv" ]; then
     cd deps
     cd deps

+ 3 - 4
deploy/raspberry/CMakeLists.txt

@@ -58,13 +58,12 @@ if (WIN32)
   find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH)
   find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH)
   unset(OpenCV_DIR CACHE)
   unset(OpenCV_DIR CACHE)
 else ()
 else ()
-  find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/cmake NO_DEFAULT_PATH)
+	find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/share/OpenCV NO_DEFAULT_PATH)
 endif ()
 endif ()
 
 
 include_directories(${OpenCV_INCLUDE_DIRS})
 include_directories(${OpenCV_INCLUDE_DIRS})
 
 
 if (WIN32)
 if (WIN32)
-    add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
     set(CMAKE_C_FLAGS_DEBUG   "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd")
     set(CMAKE_C_FLAGS_DEBUG   "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd")
     set(CMAKE_C_FLAGS_RELEASE  "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT")
     set(CMAKE_C_FLAGS_RELEASE  "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT")
     set(CMAKE_CXX_FLAGS_DEBUG  "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd")
     set(CMAKE_CXX_FLAGS_DEBUG  "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd")
@@ -87,11 +86,11 @@ endif()
 
 
 if (NOT WIN32)
 if (NOT WIN32)
     set(DEPS ${DEPS}
     set(DEPS ${DEPS}
-        glog gflags  z  yaml-cpp
+        gflags yaml-cpp
         )
         )
 else()
 else()
     set(DEPS ${DEPS}
     set(DEPS ${DEPS}
-        glog gflags_static libprotobuf zlibstatic xxhash libyaml-cppmt)
+        gflags_static libprotobuf zlibstatic xxhash libyaml-cppmt)
     set(DEPS ${DEPS} libcmt shlwapi)
     set(DEPS ${DEPS} libcmt shlwapi)
 endif(NOT WIN32)
 endif(NOT WIN32)
 
 

+ 1 - 1
deploy/raspberry/demo/classifier.cpp

@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // See the License for the specific language governing permissions and
 // limitations under the License.
 // limitations under the License.
 
 
-#include <glog/logging.h>
+#include <gflags/gflags.h>
 
 
 #include <fstream>
 #include <fstream>
 #include <iostream>
 #include <iostream>

+ 1 - 2
deploy/raspberry/demo/detector.cpp

@@ -12,8 +12,7 @@
 // See the License for the specific language governing permissions and
 // See the License for the specific language governing permissions and
 // limitations under the License.
 // limitations under the License.
 
 
-#include <glog/logging.h>
-#include <omp.h>
+#include <gflags/gflags.h>
 
 
 #include <algorithm>
 #include <algorithm>
 #include <chrono>  // NOLINT
 #include <chrono>  // NOLINT

+ 1 - 1
deploy/raspberry/demo/segmenter.cpp

@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // See the License for the specific language governing permissions and
 // limitations under the License.
 // limitations under the License.
 
 
-#include <glog/logging.h>
+#include <gflags/gflags.h>
 
 
 #include <algorithm>
 #include <algorithm>
 #include <fstream>
 #include <fstream>

+ 1 - 11
deploy/raspberry/python/demo.py

@@ -46,13 +46,6 @@ def arg_parser():
         default=1,
         default=1,
         help="Path to PaddelX model yml file")
         help="Path to PaddelX model yml file")
 
 
-    parser.add_argument(
-        "--input_shape",
-        "-ip",
-        type=str,
-        default=None,
-        help=" image input shape of model [NCHW] like [1,3,224,244] ")
-
     return parser
     return parser
 
 
 
 
@@ -62,11 +55,8 @@ def main():
     model_nb = args.model_dir
     model_nb = args.model_dir
     model_yaml = args.cfg_file
     model_yaml = args.cfg_file
     thread_num = args.thread_num
     thread_num = args.thread_num
-    input_shape = args.input_shape
-    input_shape = input_shape[1:-1].split(",", 3)
-    shape = list(map(int, input_shape))
     #model init
     #model init
-    predictor = deploy.Predictor(model_nb, model_yaml, thread_num, shape)
+    predictor = deploy.Predictor(model_nb, model_yaml, thread_num)
 
 
     #predict
     #predict
     if (args.img_list != None):
     if (args.img_list != None):

+ 11 - 5
deploy/raspberry/python/deploy.py

@@ -24,11 +24,10 @@ from paddlelite.lite import *
 
 
 
 
 class Predictor:
 class Predictor:
-    def __init__(self, model_nb, model_yaml, thread_num, shape):
+    def __init__(self, model_nb, model_yaml, thread_num):
         if not osp.exists(model_nb):
         if not osp.exists(model_nb):
             print("model nb file is not exists in {}".format(model_xml))
             print("model nb file is not exists in {}".format(model_xml))
         self.model_nb = model_nb
         self.model_nb = model_nb
-        self.shape = shape
         config = MobileConfig()
         config = MobileConfig()
         config.set_model_from_file(model_nb)
         config.set_model_from_file(model_nb)
         config.set_threads(thread_num)
         config.set_threads(thread_num)
@@ -100,6 +99,7 @@ class Predictor:
     def raw_predict(self, preprocessed_input):
     def raw_predict(self, preprocessed_input):
         self.count_num += 1
         self.count_num += 1
         input_tensor = self.predictor.get_input(0)
         input_tensor = self.predictor.get_input(0)
+        im_shape = preprocessed_input['image'].shape
         input_tensor.resize(self.shape)
         input_tensor.resize(self.shape)
         input_tensor.set_float_data(preprocessed_input['image'])
         input_tensor.set_float_data(preprocessed_input['image'])
         if self.model_name == "YOLOv3":
         if self.model_name == "YOLOv3":
@@ -123,18 +123,23 @@ class Predictor:
         res = dict()
         res = dict()
         if self.model_type == "classifier":
         if self.model_type == "classifier":
             im, = self.transforms(image)
             im, = self.transforms(image)
+            self.shape = [1] + list(im.shape)
             im = np.expand_dims(im, axis=0).copy()
             im = np.expand_dims(im, axis=0).copy()
             im = im.flatten()
             im = im.flatten()
             res['image'] = im
             res['image'] = im
         elif self.model_type == "detector":
         elif self.model_type == "detector":
             if self.model_name == "YOLOv3":
             if self.model_name == "YOLOv3":
                 im, im_shape = self.transforms(image)
                 im, im_shape = self.transforms(image)
+                self.shape = [1] + list(im.shape)
                 im = np.expand_dims(im, axis=0).copy()
                 im = np.expand_dims(im, axis=0).copy()
                 im_shape = np.expand_dims(im_shape, axis=0).copy()
                 im_shape = np.expand_dims(im_shape, axis=0).copy()
+                im = im.flatten()
+                im_shape = im_shape.flatten()
                 res['image'] = im
                 res['image'] = im
                 res['im_size'] = im_shape
                 res['im_size'] = im_shape
             if self.model_name.count('RCNN') > 0:
             if self.model_name.count('RCNN') > 0:
                 im, im_resize_info, im_shape = self.transforms(image)
                 im, im_resize_info, im_shape = self.transforms(image)
+                self.shape = [1] + list(im.shape)
                 im = np.expand_dims(im, axis=0).copy()
                 im = np.expand_dims(im, axis=0).copy()
                 im_resize_info = np.expand_dims(im_resize_info, axis=0).copy()
                 im_resize_info = np.expand_dims(im_resize_info, axis=0).copy()
                 im_shape = np.expand_dims(im_shape, axis=0).copy()
                 im_shape = np.expand_dims(im_shape, axis=0).copy()
@@ -143,6 +148,7 @@ class Predictor:
                 res['im_shape'] = im_shape
                 res['im_shape'] = im_shape
         elif self.model_type == "segmenter":
         elif self.model_type == "segmenter":
             im, im_info = self.transforms(image)
             im, im_info = self.transforms(image)
+            self.shape = [1] + list(im.shape)
             im = np.expand_dims(im, axis=0).copy()
             im = np.expand_dims(im, axis=0).copy()
             #np.savetxt('./input_data.txt',im.flatten())
             #np.savetxt('./input_data.txt',im.flatten())
             res['image'] = im
             res['image'] = im
@@ -167,7 +173,7 @@ class Predictor:
         out_label = out_label_tensor.float_data()
         out_label = out_label_tensor.float_data()
         label_shape = tuple(out_label_tensor.shape())
         label_shape = tuple(out_label_tensor.shape())
         label_map = np.array(out_label).astype('uint8')
         label_map = np.array(out_label).astype('uint8')
-        label_map = label_map.reshap(label_shape)
+        label_map = label_map.reshape(label_shape)
         label_map = np.squeeze(label_map)
         label_map = np.squeeze(label_map)
 
 
         out_score_tensor = self.predictor.get_output(1)
         out_score_tensor = self.predictor.get_output(1)
@@ -197,11 +203,12 @@ class Predictor:
         out_data = out_tensor.float_data()
         out_data = out_tensor.float_data()
         out_shape = tuple(out_tensor.shape())
         out_shape = tuple(out_tensor.shape())
         out_data = np.array(out_data)
         out_data = np.array(out_data)
-        outputs = label_data.reshap(out_shape)
+        outputs = out_data.reshape(out_shape)
 
 
         result = []
         result = []
         for out in outputs:
         for out in outputs:
             result.append(out.tolist())
             result.append(out.tolist())
+        #print(result)
         return result
         return result
 
 
     def predict(self, image, topk=1, threshold=0.5):
     def predict(self, image, topk=1, threshold=0.5):
@@ -212,5 +219,4 @@ class Predictor:
         elif self.model_type == "detector":
         elif self.model_type == "detector":
             results = self.detector_postprocess(preprocessed_input)
             results = self.detector_postprocess(preprocessed_input)
         elif self.model_type == "segmenter":
         elif self.model_type == "segmenter":
-            pass
             results = self.segmenter_postprocess(preprocessed_input)
             results = self.segmenter_postprocess(preprocessed_input)

+ 0 - 2
deploy/raspberry/scripts/build.sh

@@ -3,8 +3,6 @@ LITE_DIR=/path/to/Paddle-Lite/inference/lib
 
 
 # gflags预编译库的路径
 # gflags预编译库的路径
 GFLAGS_DIR=$(pwd)/deps/gflags
 GFLAGS_DIR=$(pwd)/deps/gflags
-# glog预编译库的路径
-GLOG_DIR=$(pwd)/deps/glog
 
 
 # opencv预编译库的路径, 如果使用自带预编译版本可不修改
 # opencv预编译库的路径, 如果使用自带预编译版本可不修改
 OPENCV_DIR=$(pwd)/deps/opencv
 OPENCV_DIR=$(pwd)/deps/opencv

+ 1 - 12
deploy/raspberry/scripts/install_third-party.sh

@@ -11,18 +11,7 @@ if [ ! -d "./deps/gflag" ]; then
     cd ..
     cd ..
     cd ..
     cd ..
 fi
 fi
-if [ ! -d "./deps/glog" ]; then
-    cd deps
-    git clone https://github.com/google/glog
-    sudo apt-get install autoconf automake libtool
-    cd glog
-    ./autogen.sh
-    ./configure
-    make -j 4
-    cd ..
-    cd ..
-fi
-OPENCV_URL=https://bj.bcebos.com/paddlex/deploy/armopencv/opencv.tar.bz2
+OPENCV_URL=https://bj.bcebos.com/paddlex/deploy/armlinux/opencv.tar.bz2
 if [ ! -d "./deps/opencv" ]; then
 if [ ! -d "./deps/opencv" ]; then
     cd deps
     cd deps
     wget -c ${OPENCV_URL}
     wget -c ${OPENCV_URL}

+ 5 - 8
deploy/raspberry/src/paddlex.cpp

@@ -118,26 +118,23 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
     std::cerr << "Preprocess failed!" << std::endl;
     std::cerr << "Preprocess failed!" << std::endl;
     return false;
     return false;
   }
   }
-  int h = inputs_.new_im_size_[0];
-  int w = inputs_.new_im_size_[1];
   if (name == "YOLOv3") {
   if (name == "YOLOv3") {
     std::unique_ptr<paddle::lite_api::Tensor> im_size_tensor(
     std::unique_ptr<paddle::lite_api::Tensor> im_size_tensor(
       std::move(predictor_->GetInput(1)));
       std::move(predictor_->GetInput(1)));
-    const std::vector<int64_t> IM_SIZE_SHAPE = {1, 2};
-    im_size_tensor->Resize(IM_SIZE_SHAPE);
+    im_size_tensor->Resize({1,2});
     auto *im_size_data = im_size_tensor->mutable_data<int>();
     auto *im_size_data = im_size_tensor->mutable_data<int>();
     memcpy(im_size_data, inputs_.ori_im_size_.data(), 1*2*sizeof(int));
     memcpy(im_size_data, inputs_.ori_im_size_.data(), 1*2*sizeof(int));
   }
   }
   predictor_->Run();
   predictor_->Run();
   auto output_names = predictor_->GetOutputNames();
   auto output_names = predictor_->GetOutputNames();
   auto output_box_tensor = predictor_->GetTensor(output_names[0]);
   auto output_box_tensor = predictor_->GetTensor(output_names[0]);
-  const float *output_box = output_box_tensor->mutable_data<float>();
-  std::vector<int64_t> output_box_shape = output_box_tensor->shape();
-  int size = 1;
+  auto *output_box = output_box_tensor->mutable_data<float>();
+  auto output_box_shape = output_box_tensor->shape();
+  int64_t size = 1;
   for (const auto& i : output_box_shape) {
   for (const auto& i : output_box_shape) {
     size *= i;
     size *= i;
   }
   }
-  int num_boxes = size / 6;
+  auto num_boxes = static_cast<int>(size / 6);
   for (int i = 0; i < num_boxes; ++i) {
   for (int i = 0; i < num_boxes; ++i) {
     Box box;
     Box box;
     box.category_id = static_cast<int>(round(output_box[i * 6]));
     box.category_id = static_cast<int>(round(output_box[i * 6]));

+ 0 - 4
docs/deploy/openvino/linux.md

@@ -33,8 +33,6 @@ git clone https://github.com/PaddlePaddle/PaddleX.git
 
 
 - gflags:编译请参考 [编译文档](https://gflags.github.io/gflags/#download)  
 - gflags:编译请参考 [编译文档](https://gflags.github.io/gflags/#download)  
 
 
-- glog:编译请参考[编译文档](https://github.com/google/glog)
-
 - opencv: 编译请参考
 - opencv: 编译请参考
 [编译文档](https://docs.opencv.org/master/d7/d9f/tutorial_linux_install.html)
 [编译文档](https://docs.opencv.org/master/d7/d9f/tutorial_linux_install.html)
 
 
@@ -47,8 +45,6 @@ git clone https://github.com/PaddlePaddle/PaddleX.git
 OPENVINO_DIR=$INTEL_OPENVINO_DIR/inference_engine
 OPENVINO_DIR=$INTEL_OPENVINO_DIR/inference_engine
 # gflags预编译库的路径
 # gflags预编译库的路径
 GFLAGS_DIR=$(pwd)/deps/gflags
 GFLAGS_DIR=$(pwd)/deps/gflags
-# glog预编译库的路径
-GLOG_DIR=$(pwd)/deps/glog
 # ngraph lib预编译库的路径
 # ngraph lib预编译库的路径
 NGRAPH_LIB=$INTEL_OPENVINO_DIR/deployment_tools/ngraph/lib
 NGRAPH_LIB=$INTEL_OPENVINO_DIR/deployment_tools/ngraph/lib
 # opencv预编译库的路径
 # opencv预编译库的路径

+ 2 - 4
docs/deploy/openvino/windows.md

@@ -40,11 +40,10 @@ git clone https://github.com/PaddlePaddle/PaddleX.git
 
 
 ### Step2 软件依赖
 ### Step2 软件依赖
 提供了依赖软件预编译库:
 提供了依赖软件预编译库:
-- [gflas-glog](https://bj.bcebos.com/paddlex/deploy/windows/third-parts.zip)  
+- [gflas](https://bj.bcebos.com/paddlex/deploy/windows/third-parts.zip)  
 - [opencv](https://bj.bcebos.com/paddleseg/deploy/opencv-3.4.6-vc14_vc15.exe)  
 - [opencv](https://bj.bcebos.com/paddleseg/deploy/opencv-3.4.6-vc14_vc15.exe)  
 请下载上面两个连接的预编译库。若需要自行下载请参考:
 请下载上面两个连接的预编译库。若需要自行下载请参考:
 - gflags:[下载地址](https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/gflags)
 - gflags:[下载地址](https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/gflags)
-- glog:[编译文档](https://github.com/google/glog)
 - opencv:[下载地址](https://opencv.org/releases/)  
 - opencv:[下载地址](https://opencv.org/releases/)  
 下载完opencv后需要配置环境变量,如下流程所示  
 下载完opencv后需要配置环境变量,如下流程所示  
     - 我的电脑->属性->高级系统设置->环境变量
     - 我的电脑->属性->高级系统设置->环境变量
@@ -55,7 +54,7 @@ git clone https://github.com/PaddlePaddle/PaddleX.git
 1. 打开Visual Studio 2019 Community,点击`继续但无需代码`
 1. 打开Visual Studio 2019 Community,点击`继续但无需代码`
 2. 点击: `文件`->`打开`->`CMake` 选择C++预测代码所在路径(例如`D:\projects\PaddleX\deploy\openvino`),并打开`CMakeList.txt`  
 2. 点击: `文件`->`打开`->`CMake` 选择C++预测代码所在路径(例如`D:\projects\PaddleX\deploy\openvino`),并打开`CMakeList.txt`  
 3. 点击:`项目`->`CMake设置`
 3. 点击:`项目`->`CMake设置`
-4. 点击`浏览`,分别设置编译选项指定`OpenVINO`、`Gflags`、`GLOG`、`NGRAPH`、`OPENCV`的路径  
+4. 点击`浏览`,分别设置编译选项指定`OpenVINO`、`Gflags`、`NGRAPH`、`OPENCV`的路径  
 
 
 |  参数名   | 含义  |
 |  参数名   | 含义  |
 |  ----  | ----  |
 |  ----  | ----  |
@@ -63,7 +62,6 @@ git clone https://github.com/PaddlePaddle/PaddleX.git
 | OPENVINO_DIR | OpenVINO推理库路径,在OpenVINO安装目录下的deployment/inference_engine目录,若未修改OpenVINO默认安装目录可以不用修改 |
 | OPENVINO_DIR | OpenVINO推理库路径,在OpenVINO安装目录下的deployment/inference_engine目录,若未修改OpenVINO默认安装目录可以不用修改 |
 | NGRAPH_LIB | OpenVINO的ngraph库路径,在OpenVINO安装目录下的deployment/ngraph/lib目录,若未修改OpenVINO默认安装目录可以不用修改 |
 | NGRAPH_LIB | OpenVINO的ngraph库路径,在OpenVINO安装目录下的deployment/ngraph/lib目录,若未修改OpenVINO默认安装目录可以不用修改 |
 | GFLAGS_DIR | gflags库路径 |
 | GFLAGS_DIR | gflags库路径 |
-| GLOG_DIR  | glog库路径 |
 | WITH_STATIC_LIB | 是否静态编译,默认为True |  
 | WITH_STATIC_LIB | 是否静态编译,默认为True |  
 
 
 **设置完成后**, 点击`保存并生成CMake缓存以加载变量`。
 **设置完成后**, 点击`保存并生成CMake缓存以加载变量`。

+ 0 - 4
docs/deploy/raspberry/Raspberry.md

@@ -60,8 +60,6 @@ sudo ./lite/tools/build.sh  --arm_os=armlinux --arm_abi=armv7hf --arm_lang=gcc
 
 
 - gflags:编译请参考 [编译文档](https://gflags.github.io/gflags/#download)  
 - gflags:编译请参考 [编译文档](https://gflags.github.io/gflags/#download)  
 
 
-- glog:编译请参考[编译文档](https://github.com/google/glog)
-
 - opencv: 编译请参考
 - opencv: 编译请参考
 [编译文档](https://docs.opencv.org/master/d7/d9f/tutorial_linux_install.html)
 [编译文档](https://docs.opencv.org/master/d7/d9f/tutorial_linux_install.html)
 ### Step4: 编译
 ### Step4: 编译
@@ -71,8 +69,6 @@ sudo ./lite/tools/build.sh  --arm_os=armlinux --arm_abi=armv7hf --arm_lang=gcc
 LITE_DIR=/path/to/Paddle-Lite/inference/lib
 LITE_DIR=/path/to/Paddle-Lite/inference/lib
 # gflags预编译库的路径
 # gflags预编译库的路径
 GFLAGS_DIR=$(pwd)/deps/gflags
 GFLAGS_DIR=$(pwd)/deps/gflags
-# glog预编译库的路径
-GLOG_DIR=$(pwd)/deps/glog
 # opencv预编译库的路径
 # opencv预编译库的路径
 OPENCV_DIR=$(pwd)/deps/opencv/
 OPENCV_DIR=$(pwd)/deps/opencv/
 ```
 ```

+ 5 - 4
docs/deploy/raspberry/python.md

@@ -23,8 +23,9 @@ python -m pip install paddlelite
 | --img  | 要预测的图片文件路径 |
 | --img  | 要预测的图片文件路径 |
 | --image_list  | 按行存储图片路径的.txt文件 |
 | --image_list  | 按行存储图片路径的.txt文件 |
 | --cfg_file | PaddleX model 的.yml配置文件 |
 | --cfg_file | PaddleX model 的.yml配置文件 |
-| --thread_num  | 预测的线程数, 默认值为1 |
-| --input_shape  | 模型输入中图片输入的大小[N,C,H.W] |
+| --thread_num  | 预测的线程数, 默认值为1 |  
+
+**注意**:由于Paddle-lite的python api尚不支持int64数据的输入,目前树莓派在python下不支持部署YoloV3,如需要请使用C++代码部署YoloV3模型
 
 
 ### 样例
 ### 样例
 `样例一`:  
 `样例一`:  
@@ -33,7 +34,7 @@ python -m pip install paddlelite
 ```
 ```
 cd /root/projects/python  
 cd /root/projects/python  
 
 
-python demo.py --model_dir /path/to/openvino_model --img /path/to/test_img.jpeg --cfg_file /path/to/PadlleX_model.yml --thread_num 4 --input_shape [1,3,224,224]
+python demo.py --model_dir /path/to/openvino_model --img /path/to/test_img.jpeg --cfg_file /path/to/PadlleX_model.yml --thread_num 4 
 ```  
 ```  
 
 
 样例二`:
 样例二`:
@@ -50,5 +51,5 @@ python demo.py --model_dir /path/to/openvino_model --img /path/to/test_img.jpeg
 ```
 ```
 cd /root/projects/python  
 cd /root/projects/python  
 
 
-python demo.py --model_dir /path/to/models/openvino_model --image_list /root/projects/images_list.txt --cfg_file=/path/to/PadlleX_model.yml --thread_num 4 --input_shape [1,3,224,224]
+python demo.py --model_dir /path/to/models/openvino_model --image_list /root/projects/images_list.txt --cfg_file=/path/to/PadlleX_model.yml --thread_num 4 
 ```
 ```