// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include #include #include #include #include "onnxruntime_cxx_api.h" // NOLINT #include "ultra_infer/runtime/backends/backend.h" namespace ultra_infer { // Convert FDDataType to OrtDataType ONNXTensorElementDataType GetOrtDtype(const FDDataType &fd_dtype); // Convert OrtDataType to FDDataType FDDataType GetFdDtype(const ONNXTensorElementDataType &ort_dtype); // Create Ort::Value // is_backend_cuda specify if the onnxruntime use CUDAExectionProvider // While is_backend_cuda = true, and tensor.device = Device::GPU // Will directly share the cuda data in tensor to OrtValue Ort::Value CreateOrtValue(FDTensor &tensor, bool is_backend_cuda = false); } // namespace ultra_infer