- This is a helper class for deep learning frameworks especially for inference
- This class provides an interface to use various deep learnig frameworks, so that you can use the same application code
- TensorFlow Lite
- TensorFlow Lite with delegate (GPU, XNNPACK, EdgeTPU)
- TensorRT
- OpenCV(dnn)
- ncnn
- MNN
https://github.com/iwatake2222/InferenceHelper_Sample
- https://github.com/iwatake2222/play_with_tflite
- https://github.com/iwatake2222/play_with_tensorrt
- https://github.com/iwatake2222/play_with_ncnn
- https://github.com/iwatake2222/play_with_mnn
- Windows 10 (Visual Studio 2017 x64)
- Linux (Xubuntu 18.04 x64)
- Linux (Jetson Xavier NX)
- Add this repository into your project (Using
git submodule
is recommended) - This class requires pre-built deep learning framework library and appropreate cmake variables need to be set
- Please see the sample project
-
CMake variables
THIRD_PARTY_DIR
: set the directory containing pre-built deep learning framework libraries
-
Add InferenceHelper and CommonHelper to your project
set(THIRD_PARTY_DIR ${CMAKE_CURRENT_LIST_DIR}/../../third_party/) set(INFERENCE_HELPER_DIR ${CMAKE_CURRENT_LIST_DIR}/../../InferenceHelper/) add_subdirectory(${INFERENCE_HELPER_DIR}/CommonHelper CommonHelper) target_include_directories(${LibraryName} PUBLIC ${INFERENCE_HELPER_DIR}/CommonHelper) target_link_libraries(${LibraryName} CommonHelper) add_subdirectory(${INFERENCE_HELPER_DIR}/InferenceHelper InferenceHelper) target_include_directories(${LibraryName} PUBLIC ${INFERENCE_HELPER_DIR}/InferenceHelper) target_link_libraries(${LibraryName} InferenceHelper)
-
Deep learning framework:
- You can enable multiple options althoguh the following example enables just one option
# OpenCV (dnn) cmake .. -DINFERENCE_HELPER_ENABLE_OPENCV=on # Tensorflow Lite cmake .. -DINFERENCE_HELPER_ENABLE_TFLITE=on # Tensorflow Lite (XNNPACK) cmake .. -DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK=on # Tensorflow Lite (GPU) cmake .. -DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_GPU=on # Tensorflow Lite (EdgeTPU) cmake .. -DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU=on # TensorRT cmake .. -DINFERENCE_HELPER_ENABLE_TENSORRT=on # ncnn cmake .. -DINFERENCE_HELPER_ENABLE_NCNN=on # MNN cmake .. -DINFERENCE_HELPER_ENABLE_MNN=on
-
Enable/Disable preprocess using OpenCV:
- By disabling this option, InferenceHelper is not dependent on OpenCV
cmake .. -INFERENCE_HELPER_ENABLE_PRE_PROCESS_BY_OPENCV=off
typedef enum {
OPEN_CV,
OPEN_CV_GPU,
TENSORFLOW_LITE,
TENSORFLOW_LITE_XNNPACK,
TENSORFLOW_LITE_GPU,
TENSORFLOW_LITE_EDGETPU,
TENSOR_RT,
NCNN,
MNN,
} HELPER_TYPE;
- Create InferenceHelper instance for the selected framework
std::unique_ptr<InferenceHelper> inferenceHelper(InferenceHelper::create(InferenceHelper::TENSORFLOW_LITE));
static void preProcessByOpenCV(const InputTensorInfo& inputTensorInfo, bool isNCHW, cv::Mat& imgBlob)
- Run preprocess (convert image to blob(NCHW or NHWC))
- This is just a helper function. You may not use this function.
- Available when
INFERENCE_HELPER_ENABLE_PRE_PROCESS_BY_OPENCV=on
- Available when
InferenceHelper::preProcessByOpenCV(inputTensorInfo, false, imgSrc);
- Set the number of threads to be used
- This function needs to be called before initialize
inferenceHelper->setNumThread(4);
- Set custom ops
- This function needs to be called before initialize
std::vector<std::pair<const char*, const void*>> customOps;
customOps.push_back(std::pair<const char*, const void*>("Convolution2DTransposeBias", (const void*)mediapipe::tflite_operations::RegisterConvolution2DTransposeBias()));
inferenceHelper->setCustomOps(customOps);
int32_t initialize(const std::string& modelFilename, std::vector& inputTensorInfoList, std::vector& outputTensorInfoList)
- Initialize inference helper
- Load model
- Set tensor information
std::vector<InputTensorInfo> inputTensorList;
InputTensorInfo inputTensorInfo;
inputTensorInfo.name = "input";
inputTensorInfo.tensorType = TensorInfo::TENSOR_TYPE_FP32;
inputTensorInfo.tensorDims.batch = 1;
inputTensorInfo.tensorDims.width = 224;
inputTensorInfo.tensorDims.height = 224;
inputTensorInfo.tensorDims.channel = 3;
inputTensorInfo.data = imgSrc.data;
inputTensorInfo.dataType = InputTensorInfo::DATA_TYPE_IMAGE;
inputTensorInfo.imageInfo.width = imgSrc.cols;
inputTensorInfo.imageInfo.height = imgSrc.rows;
inputTensorInfo.imageInfo.channel = imgSrc.channels();
inputTensorInfo.imageInfo.cropX = 0;
inputTensorInfo.imageInfo.cropY = 0;
inputTensorInfo.imageInfo.cropWidth = imgSrc.cols;
inputTensorInfo.imageInfo.cropHeight = imgSrc.rows;
inputTensorInfo.imageInfo.isBGR = false;
inputTensorInfo.imageInfo.swapColor = false;
inputTensorInfo.normalize.mean[0] = 0.485f;
inputTensorInfo.normalize.mean[1] = 0.456f;
inputTensorInfo.normalize.mean[2] = 0.406f;
inputTensorInfo.normalize.norm[0] = 0.229f;
inputTensorInfo.normalize.norm[1] = 0.224f;
inputTensorInfo.normalize.norm[2] = 0.225f;
inputTensorList.push_back(inputTensorInfo);
std::vector<OutputTensorInfo> outputTensorList;
OutputTensorInfo outputTensorInfo;
outputTensorInfo.name = "MobilenetV2/Predictions/Reshape_1";
outputTensorInfo.tensorType = TensorInfo::TENSOR_TYPE_FP32;
outputTensorList.push_back(outputTensorInfo);
inferenceHelper->initialize("mobilenet_v2_1.0_224.tflite", inputTensorList, outputTensorList);
- Finalize inference helper
inferenceHelper->finalize();
- Run preprocess
- Call this function before invoke
- Call this function even if the input data is already pre-processed in order to copy data to memory
- Note : Some frameworks don't support crop, resize. So, it's better to resize image before calling preProcess.
inferenceHelper->preProcess(inputTensorList);
- Run inference
inferenceHelper->invoke(outputTensorList)
enum {
TENSOR_TYPE_NONE,
TENSOR_TYPE_UINT8,
TENSOR_TYPE_FP32,
TENSOR_TYPE_INT32,
TENSOR_TYPE_INT64,
};
std::string name; // [In] Set the name of tensor
int32_t id; // [Out] Do not modify (Used in InferenceHelper)
int32_t tensorType; // [In] The type of tensor (e.g. TENSOR_TYPE_FP32)
struct {
int32_t batch; // 0
int32_t width; // 1
int32_t height; // 2
int32_t channel; // 3
} tensorDims; // InputTensorInfo: [In] The dimentions of tensor. (If -1 is set at initialize, the size is updated from model info.)
// OutputTensorInfo: [Out] The dimentions of tensor is set from model information
enum {
DATA_TYPE_IMAGE,
DATA_TYPE_BLOB_NHWC, // data which already finished preprocess(color conversion, resize, normalize, etc.)
DATA_TYPE_BLOB_NCHW,
};
void* data; // [In] Set the pointer to image/blob
int32_t dataType; // [In] Set the type of data (e.g. DATA_TYPE_IMAGE)
struct {
int32_t width;
int32_t height;
int32_t channel;
int32_t cropX;
int32_t cropY;
int32_t cropWidth;
int32_t cropHeight;
bool isBGR; // used when channel == 3 (true: BGR, false: RGB)
bool swapColor;
} imageInfo; // [In] used when dataType == DATA_TYPE_IMAGE
struct {
float mean[3];
float norm[3];
} normalize; // [In] used when dataType == DATA_TYPE_IMAGE
void* data; // [Out] Pointer to the output data
struct {
float scale;
uint8_t zeroPoint;
} quant; // [Out] Parameters for dequantization (convert uint8 to float)
- Get output data in the form of FP32
- When tensor type is INT8 (quantized), the data is converted to FP32 (dequantized)
const float* valFloat = outputTensorList[0].getDataAsFloat();
- InferenceHelper
- https://github.com/iwatake2222/InferenceHelper
- Copyright 2020 iwatake2222
- Licensed under the Apache License, Version 2.0
- This project utilizes OSS (Open Source Software)