From 71cc98764a9918cdd4c935aa2c44389d8d675276 Mon Sep 17 00:00:00 2001 From: Hemal Shah Date: Thu, 19 Oct 2023 02:22:26 +0000 Subject: [PATCH] Isaac ROS 2.0.0 --- README.md | 322 +---- docs/tutorial-ess-realsense.md | 34 - docs/tutorial-isaac-sim.md | 47 - docs/tutorial-nitros-graph.md | 45 - docs/visualize-image.md | 37 - isaac_ros_ess/CMakeLists.txt | 15 +- isaac_ros_ess/config/ess_inference.yaml | 77 +- .../config/isaac_ros_ess_isaac_sim.rviz | 28 +- isaac_ros_ess/config/isaac_ros_ess_zed.rviz | 197 +++ isaac_ros_ess/config/realsense.yaml | 2 +- isaac_ros_ess/config/zed.yaml | 163 +++ .../gxf/ess/3dv/include/cv/ess/ESS.h | 203 --- isaac_ros_ess/gxf/ess/3dv/src/ESS.cpp | 230 ---- .../gxf/ess/3dv/src/ESSPostProcess.cpp | 129 -- .../gxf/ess/3dv/src/ESSPreProcess.cpp | 190 --- isaac_ros_ess/gxf/ess/CMakeLists.txt | 142 +- isaac_ros_ess/gxf/ess/ESS.cpp | 325 ----- isaac_ros_ess/gxf/ess/TensorOps.cpp | 95 -- .../gxf/ess/cvcore/include/cv/core/Array.h | 386 ------ .../gxf/ess/cvcore/include/cv/core/BBox.h | 142 -- .../gxf/ess/cvcore/include/cv/core/CVError.h | 116 -- .../ess/cvcore/include/cv/core/CameraModel.h | 292 ---- .../cvcore/include/cv/core/ComputeEngine.h | 43 - .../gxf/ess/cvcore/include/cv/core/Core.h | 35 - .../gxf/ess/cvcore/include/cv/core/Image.h | 893 ------------- .../cvcore/include/cv/core/Instrumentation.h | 65 - .../ess/cvcore/include/cv/core/MathTypes.h | 234 ---- .../gxf/ess/cvcore/include/cv/core/Memory.h | 135 -- .../gxf/ess/cvcore/include/cv/core/Model.h | 50 - .../ess/cvcore/include/cv/core/ProfileUtils.h | 40 - .../gxf/ess/cvcore/include/cv/core/Tensor.h | 1189 ----------------- .../ess/cvcore/include/cv/core/TensorList.h | 37 - .../ess/cvcore/include/cv/core/TensorMap.h | 534 -------- .../gxf/ess/cvcore/include/cv/core/Traits.h | 478 ------- .../cvcore/include/cv/tensor_ops/BBoxUtils.h | 135 -- .../ess/cvcore/include/cv/tensor_ops/DBScan.h | 91 -- .../ess/cvcore/include/cv/tensor_ops/Errors.h | 49 - .../cvcore/include/cv/tensor_ops/IImageWarp.h | 63 - .../cv/tensor_ops/ITensorOperatorContext.h | 65 - .../cv/tensor_ops/ITensorOperatorStream.h | 251 ---- .../cvcore/include/cv/tensor_ops/ImageUtils.h | 1091 --------------- .../include/cv/tensor_ops/OneEuroFilter.h | 82 -- .../include/cv/tensor_ops/TensorOperators.h | 48 - .../cvcore/include/cv/trtbackend/TRTBackend.h | 203 --- .../gxf/ess/cvcore/src/core/cvcore/Array.cpp | 145 -- .../gxf/ess/cvcore/src/core/cvcore/Dummy.cu | 0 .../ess/cvcore/src/core/cvcore/MathTypes.cpp | 244 ---- .../gxf/ess/cvcore/src/core/cvcore/Tensor.cpp | 270 ---- .../ess/cvcore/src/core/utility/CVError.cpp | 123 -- .../src/core/utility/Instrumentation.cpp | 95 -- .../ess/cvcore/src/core/utility/Memory.cpp | 124 -- .../cvcore/src/core/utility/ProfileUtils.cpp | 128 -- .../tensorrt/TensorRTInferencer.cpp | 275 ---- .../src/inferencer/tensorrt/TensorRTUtils.cpp | 64 - .../src/inferencer/triton/TritonUtils.cpp | 84 -- .../src/tensor_ops/ArithmeticOperations.cpp | 329 ----- .../ess/cvcore/src/tensor_ops/BBoxUtils.cpp | 173 --- .../src/tensor_ops/ColorConversions.cpp | 447 ------- .../gxf/ess/cvcore/src/tensor_ops/DBScan.cpp | 214 --- .../gxf/ess/cvcore/src/tensor_ops/Errors.cpp | 104 -- .../gxf/ess/cvcore/src/tensor_ops/Filters.cpp | 112 -- .../gxf/ess/cvcore/src/tensor_ops/Filters.h | 105 -- .../cvcore/src/tensor_ops/FusedOperations.cpp | 261 ---- .../src/tensor_ops/GeometryTransforms.cpp | 754 ----------- .../ess/cvcore/src/tensor_ops/NppUtils.cpp | 116 -- .../cvcore/src/tensor_ops/OneEuroFilter.cpp | 288 ---- .../cvcore/src/tensor_ops/TensorOperators.cpp | 116 -- .../tensor_ops/vpi/VPIColorConvertImpl.cpp | 135 -- .../src/tensor_ops/vpi/VPIColorConvertImpl.h | 65 - .../src/tensor_ops/vpi/VPIEnumMapping.h | 196 --- .../src/tensor_ops/vpi/VPIRemapImpl.cpp | 160 --- .../cvcore/src/tensor_ops/vpi/VPIRemapImpl.h | 82 -- .../src/tensor_ops/vpi/VPIResizeImpl.cpp | 139 -- .../cvcore/src/tensor_ops/vpi/VPIResizeImpl.h | 66 - .../src/tensor_ops/vpi/VPIStatusMapping.cpp | 122 -- .../src/tensor_ops/vpi/VPIStatusMapping.h | 38 - .../vpi/VPIStereoDisparityEstimatorImpl.cpp | 211 --- .../vpi/VPIStereoDisparityEstimatorImpl.h | 83 -- .../src/tensor_ops/vpi/VPITensorOperators.cpp | 710 ---------- .../src/tensor_ops/vpi/VPITensorOperators.h | 272 ---- .../ess/cvcore/src/trtbackend/TRTBackend.cpp | 634 --------- .../ess/components/ess_inference.cpp | 573 ++++++++ .../{ESS.hpp => components/ess_inference.hpp} | 56 +- .../VPIImageWarp.h => extensions/ess/ess.cpp} | 28 +- .../gxf/ess/extensions/ess/inference/ESS.cpp | 267 ++++ .../gxf/ess/extensions/ess/inference/ESS.h | 276 ++++ .../ess/inference/ESSPostProcess.cpp | 118 ++ .../ess/inference/ESSPreProcess.cpp | 190 +++ .../ess/extensions/tensor_ops/CameraModel.cpp | 86 -- .../ess/extensions/tensor_ops/CameraModel.hpp | 60 - .../tensor_ops/ConvertColorFormat.cpp | 214 --- .../tensor_ops/ConvertColorFormat.hpp | 51 - .../extensions/tensor_ops/CropAndResize.cpp | 161 --- .../extensions/tensor_ops/CropAndResize.hpp | 53 - .../gxf/ess/extensions/tensor_ops/Frame3D.cpp | 56 - .../gxf/ess/extensions/tensor_ops/Frame3D.hpp | 53 - .../extensions/tensor_ops/ImageAdapter.cpp | 78 -- .../extensions/tensor_ops/ImageAdapter.hpp | 101 -- .../ess/extensions/tensor_ops/ImageUtils.cpp | 175 --- .../ess/extensions/tensor_ops/ImageUtils.hpp | 65 - .../tensor_ops/InterleavedToPlanar.cpp | 146 -- .../tensor_ops/InterleavedToPlanar.hpp | 45 - .../ess/extensions/tensor_ops/Normalize.cpp | 183 --- .../ess/extensions/tensor_ops/Normalize.hpp | 47 - .../gxf/ess/extensions/tensor_ops/Reshape.cpp | 98 -- .../gxf/ess/extensions/tensor_ops/Reshape.hpp | 49 - .../gxf/ess/extensions/tensor_ops/Resize.cpp | 194 --- .../gxf/ess/extensions/tensor_ops/Resize.hpp | 55 - .../extensions/tensor_ops/TensorOperator.cpp | 235 ---- .../extensions/tensor_ops/TensorOperator.hpp | 95 -- .../extensions/tensor_ops/TensorStream.cpp | 124 -- .../extensions/tensor_ops/TensorStream.hpp | 59 - .../ess/extensions/tensor_ops/Undistort.cpp | 285 ---- .../ess/extensions/tensor_ops/Undistort.hpp | 69 - .../detail/ImageAdapterTensorImpl.cpp | 105 -- .../detail/ImageAdapterTensorImpl.hpp | 105 -- .../detail/ImageAdapterVideoBufferImpl.cpp | 88 -- .../detail/ImageAdapterVideoBufferImpl.hpp | 294 ---- .../dnn_inferencer}/inferencer/Errors.cpp | 45 +- .../dnn_inferencer}/inferencer/Errors.h | 28 +- .../inferencer/IInferenceBackend.h | 97 +- .../dnn_inferencer}/inferencer/Inferencer.cpp | 81 +- .../dnn_inferencer}/inferencer/Inferencer.h | 49 +- .../inferencer/TensorRTInferencer.cpp | 387 ++++++ .../inferencer}/TensorRTInferencer.h | 62 +- .../inferencer/TensorRTUtils.cpp | 57 + .../inferencer}/TensorRTUtils.h | 24 +- .../inferencer/TritionUtils.cpp | 64 + .../inferencer}/TritonGrpcInferencer.cpp | 203 ++- .../inferencer}/TritonGrpcInferencer.h | 51 +- .../dnn_inferencer/inferencer}/TritonUtils.h | 20 +- .../gxf_helpers/common_expected_macro.hpp | 305 +++++ .../gxf/ess/gems/gxf_helpers/element_type.hpp | 55 + .../ess/gems/gxf_helpers/expected_macro.hpp | 38 + .../gxf/ess/gems/gxf_helpers/tensor.hpp | 137 ++ isaac_ros_ess/gxf/ess/gems/hash/hash_file.cpp | 71 + .../hash/hash_file.hpp} | 14 +- isaac_ros_ess/gxf/ess/gems/hash/sha256.cpp | 237 ++++ isaac_ros_ess/gxf/ess/gems/hash/sha256.hpp | 80 ++ .../gxf/ess/gems/video_buffer/allocator.hpp | 276 ++++ isaac_ros_ess/gxf/thresholder/CMakeLists.txt | 61 + .../components/video_buffer_thresholder.cpp | 237 ++++ .../components/video_buffer_thresholder.hpp | 71 + .../gems/video_buffer_thresholder.cu | 101 ++ .../gems/video_buffer_thresholder.cu.hpp} | 26 +- .../video_buffer_thresholder_extension.cpp | 34 + .../isaac_ros_ess/ess_disparity_node.hpp | 6 +- isaac_ros_ess/isaac_ros_ess/__init__.py | 16 + .../launch/isaac_ros_argus_ess.launch.py | 62 +- isaac_ros_ess/launch/isaac_ros_ess.launch.py | 12 +- .../launch/isaac_ros_ess_isaac_sim.launch.py | 61 +- .../launch/isaac_ros_ess_realsense.launch.py | 64 +- .../launch/isaac_ros_ess_zed.launch.py | 205 +++ isaac_ros_ess/package.xml | 3 +- .../scripts/isaac_ros_ess_visualizer.py | 8 +- isaac_ros_ess/src/ess_disparity_node.cpp | 31 +- isaac_ros_ess/test/dummy_model.onnx | 4 +- isaac_ros_ess/test/dummy_model_480x288.onnx | 3 + isaac_ros_ess/test/isaac_ros_ess_test.py | 24 +- .../test/isaac_ros_ess_test_1_16HD_model.py | 167 +++ resources/Isaac_sim_enable_stereo.png | 3 - resources/Isaac_sim_play.png | 3 - resources/Isaac_sim_set_stereo_offset.png | 3 - resources/Rviz.png | 3 - resources/Visualizer_isaac_sim.png | 3 - resources/isaac_ros_ess_nodegraph.png | 3 - resources/output_raw.jpeg | Bin 87772 -> 0 bytes resources/output_rosbag.png | 3 - resources/realsense_example.png | 3 - resources/warehouse.gif | 3 - 170 files changed, 5165 insertions(+), 19888 deletions(-) delete mode 100644 docs/tutorial-ess-realsense.md delete mode 100644 docs/tutorial-isaac-sim.md delete mode 100644 docs/tutorial-nitros-graph.md delete mode 100644 docs/visualize-image.md create mode 100644 isaac_ros_ess/config/isaac_ros_ess_zed.rviz create mode 100644 isaac_ros_ess/config/zed.yaml delete mode 100644 isaac_ros_ess/gxf/ess/3dv/include/cv/ess/ESS.h delete mode 100644 isaac_ros_ess/gxf/ess/3dv/src/ESS.cpp delete mode 100644 isaac_ros_ess/gxf/ess/3dv/src/ESSPostProcess.cpp delete mode 100644 isaac_ros_ess/gxf/ess/3dv/src/ESSPreProcess.cpp delete mode 100644 isaac_ros_ess/gxf/ess/ESS.cpp delete mode 100644 isaac_ros_ess/gxf/ess/TensorOps.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Array.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/BBox.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/CVError.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/CameraModel.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/ComputeEngine.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Core.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Image.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Instrumentation.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/MathTypes.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Memory.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Model.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/ProfileUtils.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Tensor.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/TensorList.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/TensorMap.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Traits.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/BBoxUtils.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/DBScan.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/Errors.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/IImageWarp.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/ITensorOperatorContext.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/ITensorOperatorStream.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/ImageUtils.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/OneEuroFilter.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/TensorOperators.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/include/cv/trtbackend/TRTBackend.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/Array.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/Dummy.cu delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/MathTypes.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/Tensor.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/core/utility/CVError.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/core/utility/Instrumentation.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/core/utility/Memory.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/core/utility/ProfileUtils.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/inferencer/tensorrt/TensorRTInferencer.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/inferencer/tensorrt/TensorRTUtils.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/inferencer/triton/TritonUtils.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/ArithmeticOperations.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/BBoxUtils.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/ColorConversions.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/DBScan.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/Errors.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/Filters.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/Filters.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/FusedOperations.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/GeometryTransforms.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/NppUtils.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/OneEuroFilter.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/TensorOperators.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIColorConvertImpl.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIColorConvertImpl.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIEnumMapping.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIRemapImpl.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIRemapImpl.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIResizeImpl.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIResizeImpl.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStatusMapping.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStatusMapping.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStereoDisparityEstimatorImpl.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStereoDisparityEstimatorImpl.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPITensorOperators.cpp delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPITensorOperators.h delete mode 100644 isaac_ros_ess/gxf/ess/cvcore/src/trtbackend/TRTBackend.cpp create mode 100644 isaac_ros_ess/gxf/ess/extensions/ess/components/ess_inference.cpp rename isaac_ros_ess/gxf/ess/extensions/ess/{ESS.hpp => components/ess_inference.hpp} (76%) rename isaac_ros_ess/gxf/ess/{cvcore/src/tensor_ops/vpi/VPIImageWarp.h => extensions/ess/ess.cpp} (52%) create mode 100644 isaac_ros_ess/gxf/ess/extensions/ess/inference/ESS.cpp create mode 100644 isaac_ros_ess/gxf/ess/extensions/ess/inference/ESS.h create mode 100644 isaac_ros_ess/gxf/ess/extensions/ess/inference/ESSPostProcess.cpp create mode 100644 isaac_ros_ess/gxf/ess/extensions/ess/inference/ESSPreProcess.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/CameraModel.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/CameraModel.hpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/ConvertColorFormat.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/ConvertColorFormat.hpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/CropAndResize.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/CropAndResize.hpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/Frame3D.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/Frame3D.hpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageAdapter.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageAdapter.hpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageUtils.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageUtils.hpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/InterleavedToPlanar.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/InterleavedToPlanar.hpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/Normalize.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/Normalize.hpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/Reshape.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/Reshape.hpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/Resize.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/Resize.hpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorOperator.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorOperator.hpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorStream.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorStream.hpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/Undistort.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/Undistort.hpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterTensorImpl.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterTensorImpl.hpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterVideoBufferImpl.cpp delete mode 100644 isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterVideoBufferImpl.hpp rename isaac_ros_ess/gxf/ess/{cvcore/src => gems/dnn_inferencer}/inferencer/Errors.cpp (86%) rename isaac_ros_ess/gxf/ess/{cvcore/include/cv => gems/dnn_inferencer}/inferencer/Errors.h (77%) rename isaac_ros_ess/gxf/ess/{cvcore/include/cv => gems/dnn_inferencer}/inferencer/IInferenceBackend.h (71%) rename isaac_ros_ess/gxf/ess/{cvcore/src => gems/dnn_inferencer}/inferencer/Inferencer.cpp (69%) rename isaac_ros_ess/gxf/ess/{cvcore/include/cv => gems/dnn_inferencer}/inferencer/Inferencer.h (69%) create mode 100644 isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TensorRTInferencer.cpp rename isaac_ros_ess/gxf/ess/{cvcore/src/inferencer/tensorrt => gems/dnn_inferencer/inferencer}/TensorRTInferencer.h (54%) create mode 100644 isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TensorRTUtils.cpp rename isaac_ros_ess/gxf/ess/{cvcore/src/inferencer/tensorrt => gems/dnn_inferencer/inferencer}/TensorRTUtils.h (69%) create mode 100644 isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TritionUtils.cpp rename isaac_ros_ess/gxf/ess/{cvcore/src/inferencer/triton => gems/dnn_inferencer/inferencer}/TritonGrpcInferencer.cpp (74%) rename isaac_ros_ess/gxf/ess/{cvcore/src/inferencer/triton => gems/dnn_inferencer/inferencer}/TritonGrpcInferencer.h (60%) rename isaac_ros_ess/gxf/ess/{cvcore/src/inferencer/triton => gems/dnn_inferencer/inferencer}/TritonUtils.h (81%) create mode 100644 isaac_ros_ess/gxf/ess/gems/gxf_helpers/common_expected_macro.hpp create mode 100644 isaac_ros_ess/gxf/ess/gems/gxf_helpers/element_type.hpp create mode 100644 isaac_ros_ess/gxf/ess/gems/gxf_helpers/expected_macro.hpp create mode 100644 isaac_ros_ess/gxf/ess/gems/gxf_helpers/tensor.hpp create mode 100644 isaac_ros_ess/gxf/ess/gems/hash/hash_file.cpp rename isaac_ros_ess/gxf/ess/{cvcore/src/tensor_ops/IImageWarp.cpp => gems/hash/hash_file.hpp} (68%) create mode 100644 isaac_ros_ess/gxf/ess/gems/hash/sha256.cpp create mode 100644 isaac_ros_ess/gxf/ess/gems/hash/sha256.hpp create mode 100644 isaac_ros_ess/gxf/ess/gems/video_buffer/allocator.hpp create mode 100644 isaac_ros_ess/gxf/thresholder/CMakeLists.txt create mode 100644 isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/components/video_buffer_thresholder.cpp create mode 100644 isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/components/video_buffer_thresholder.hpp create mode 100644 isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/gems/video_buffer_thresholder.cu rename isaac_ros_ess/gxf/{ess/cvcore/src/tensor_ops/NppUtils.h => thresholder/extensions/video_buffer_utils/gems/video_buffer_thresholder.cu.hpp} (50%) create mode 100644 isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/video_buffer_thresholder_extension.cpp create mode 100644 isaac_ros_ess/launch/isaac_ros_ess_zed.launch.py create mode 100644 isaac_ros_ess/test/dummy_model_480x288.onnx create mode 100644 isaac_ros_ess/test/isaac_ros_ess_test_1_16HD_model.py delete mode 100644 resources/Isaac_sim_enable_stereo.png delete mode 100644 resources/Isaac_sim_play.png delete mode 100644 resources/Isaac_sim_set_stereo_offset.png delete mode 100644 resources/Rviz.png delete mode 100644 resources/Visualizer_isaac_sim.png delete mode 100644 resources/isaac_ros_ess_nodegraph.png delete mode 100644 resources/output_raw.jpeg delete mode 100644 resources/output_rosbag.png delete mode 100644 resources/realsense_example.png delete mode 100644 resources/warehouse.gif diff --git a/README.md b/README.md index 4132929..79c04a8 100644 --- a/README.md +++ b/README.md @@ -1,310 +1,68 @@ -# Isaac ROS DNN Stereo Disparity +# Isaac ROS DNN Stereo Depth -DNN Stereo Disparity includes packages for predicting disparity of stereo input. +Hardware-accelerated, deep learned stereo disparity estimation -
+
image
--- ## Webinar Available -Learn how to use this package by watching our on-demand webinar: [Using ML Models in ROS 2 to Robustly Estimate Distance to Obstacles](https://gateway.on24.com/wcc/experience/elitenvidiabrill/1407606/3998202/isaac-ros-webinar-series) +Learn how to use this package by watching our on-demand webinar: +[Using ML Models in ROS 2 to Robustly Estimate Distance to Obstacles](https://gateway.on24.com/wcc/experience/elitenvidiabrill/1407606/3998202/isaac-ros-webinar-series) --- ## Overview -Isaac ROS DNN Disparity provides a GPU-accelerated package for DNN-based stereo disparity. Stereo disparity is calculated from a time-synchronized image pair sourced from a stereo camera and is used to produce a depth image or a point cloud of a scene. The `isaac_ros_ess` package uses the [ESS DNN model](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/isaac/models/dnn_stereo_disparity) to perform stereo depth estimation via continuous disparity prediction. Given a pair of stereo input images, the package generates a disparity map of the left input image. +[Isaac ROS DNN Stereo Depth](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_dnn_stereo_depth) provides a GPU-accelerated package for DNN-based +stereo disparity. Stereo disparity is calculated from a +time-synchronized image pair sourced from a stereo camera and is used to +produce a depth image or a point cloud for a scene. The `isaac_ros_ess` +package uses the [ESS DNN +model](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/isaac/models/dnn_stereo_disparity) +to perform stereo depth estimation via continuous disparity prediction. +Given a pair of stereo input images, the package generates a disparity +map of the left input image. -
+
image
-ESS is used in a graph of nodes to provide a disparity prediction from an input left and right stereo image pair. Images to ESS need to be rectified and resized to the appropriate input resolution. The aspect ratio of the image needs to be maintained, so the image may need to be cropped and resized to maintain the input aspect ratio. The graph for DNN encode, to DNN inference, to DNN decode is part of the ESS node. Inference is performed using TensorRT, as the ESS DNN model is designed to use optimizations supported by TensorRT. +ESS is used in a graph of nodes to provide a disparity prediction from an input left and right stereo image pair. +Images to ESS need to be rectified and resized to the appropriate input resolution. +The aspect ratio of the image is recommended to be maintained, so the image may need to be cropped and resized to maintain the input aspect ratio. +The graph for DNN encode, DNN inference, and DNN decode is included in the ESS node. +Inference is performed using TensorRT, as the ESS DNN model is designed with optimizations supported by TensorRT. +ESS node is agnostic to the model dimension and disparity output has the same dimension as the ESS model. -### ESS DNN - -[ESS](https://arxiv.org/pdf/1803.09719.pdf) stands for Enhanced Semi-Supervised stereo disparity, developed by NVIDIA. The ESS DNN is used to predict the disparity for each pixel from stereo camera image pairs. This network has improvements over classic CV approaches that use epipolar geometry to compute disparity, as the DNN can learn to predict disparity in cases where epipolar geometry feature matching fails. The semi-supervised learning and stereo disparity matching makes the ESS DNN robust in environments unseen in the training datasets and with occluded objects. This DNN is optimized for and evaluated with color (RGB) global shutter stereo camera images, and accuracy may vary with monochrome stereo images used in analytic computer vision approaches to stereo disparity. - -The predicted [disparity](https://en.wikipedia.org/wiki/Binocular_disparity) values represent the distance a point moves from one image to the other in a stereo image pair (a.k.a. the binocular image pair). The disparity is inversely proportional to the depth (i.e. `disparity = focalLength x baseline / depth`). Given the [focal length](https://en.wikipedia.org/wiki/Focal_length) and [baseline](https://en.wikipedia.org/wiki/Stereo_camera) of the camera that generates a stereo image pair, the predicted disparity map from the `isaac_ros_ess` package can be used to compute depth and generate a [point cloud](https://en.wikipedia.org/wiki/Point_cloud). - -> **Note**: Compare the requirements of your use case against the package [input limitations](#input-restrictions). - -### Isaac ROS NITROS Acceleration +## Isaac ROS NITROS Acceleration This package is powered by [NVIDIA Isaac Transport for ROS (NITROS)](https://developer.nvidia.com/blog/improve-perception-performance-for-ros-2-applications-with-nvidia-isaac-transport-for-ros/), which leverages type adaptation and negotiation to optimize message formats and dramatically accelerate communication between participating nodes. ## Performance -The following table summarizes the per-platform performance statistics of sample graphs that use this package, with links included to the full benchmark output. These benchmark configurations are taken from the [Isaac ROS Benchmark](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark#list-of-isaac-ros-benchmarks) collection, based on the [`ros2_benchmark`](https://github.com/NVIDIA-ISAAC-ROS/ros2_benchmark) framework. - -| Sample Graph | Input Size | AGX Orin | Orin NX | x86_64 w/ RTX 4060 Ti | -| --------------------------------------------------------------------------------------------------------------------------------------- | ---------- | ---------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | -| [DNN Stereo Disparity Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_ess_node.py) | 1080p | [63.6 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_node-agx_orin.json)
3.5 ms | [24.8 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_node-orin_nx.json)
5.0 ms | [173 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_node-nuc_4060ti.json)
3.4 ms | -| [DNN Stereo Disparity Graph](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts//isaac_ros_ess_graph.py) | 1080p | [52.7 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_graph-agx_orin.json)
21 ms | [20.8 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_graph-orin_nx.json)
50 ms | [156 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_graph-nuc_4060ti.json)
10 ms | - - -## Table of Contents - -- [Isaac ROS DNN Stereo Disparity](#isaac-ros-dnn-stereo-disparity) - - [Webinar Available](#webinar-available) - - [Overview](#overview) - - [ESS DNN](#ess-dnn) - - [Isaac ROS NITROS Acceleration](#isaac-ros-nitros-acceleration) - - [Performance](#performance) - - [Table of Contents](#table-of-contents) - - [Latest Update](#latest-update) - - [Supported Platforms](#supported-platforms) - - [Docker](#docker) - - [Quickstart](#quickstart) - - [Next Steps](#next-steps) - - [Try Advanced Examples](#try-advanced-examples) - - [Try NITROS-Accelerated Graph with Argus](#try-nitros-accelerated-graph-with-argus) - - [Use Different Models](#use-different-models) - - [Customize Your Dev Environment](#customize-your-dev-environment) - - [Package Reference](#package-reference) - - [`isaac_ros_ess`](#isaac_ros_ess) - - [Overview](#overview-1) - - [Usage](#usage) - - [ROS Parameters](#ros-parameters) - - [ROS Topics Subscribed](#ros-topics-subscribed) - - [ROS Topics Published](#ros-topics-published) - - [Input Restrictions](#input-restrictions) - - [Output Interpretations](#output-interpretations) - - [Troubleshooting](#troubleshooting) - - [Isaac ROS Troubleshooting](#isaac-ros-troubleshooting) - - [Deep Learning Troubleshooting](#deep-learning-troubleshooting) - - [Package not found while launching the visualizer script](#package-not-found-while-launching-the-visualizer-script) - - [Symptom](#symptom) - - [Solution](#solution) - - [Problem reserving CacheChange in reader](#problem-reserving-cachechange-in-reader) - - [Symptom](#symptom-1) - - [Solution](#solution-1) - - [Updates](#updates) - -## Latest Update - -Update 2023-05-25: Upgraded model (1.1.0). - -## Supported Platforms - -This package is designed and tested to be compatible with ROS 2 Humble running on [Jetson](https://developer.nvidia.com/embedded-computing) or an x86_64 system with an NVIDIA GPU. - -> **Note**: Versions of ROS 2 earlier than Humble are **not** supported. This package depends on specific ROS 2 implementation features that were only introduced beginning with the Humble release. - -| Platform | Hardware | Software | Notes | -| -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Jetson | [AGX Orin](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/jetson-orin/)
[Orin Nano](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/jetson-agx-xavier/) | [JetPack 5.1.1](https://developer.nvidia.com/embedded/jetpack) | For best performance, ensure that [power settings](https://docs.nvidia.com/jetson/archives/r34.1/DeveloperGuide/text/SD/PlatformPowerAndPerformance.html) are configured appropriately. | -| x86_64 | NVIDIA GPU | [Ubuntu 20.04+](https://releases.ubuntu.com/20.04/)
[CUDA 11.8+](https://developer.nvidia.com/cuda-downloads) | - -### Docker - -To simplify development, we strongly recommend leveraging the Isaac ROS Dev Docker images by following [these steps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_common/blob/main/docs/dev-env-setup.md). This will streamline your development environment setup with the correct versions of dependencies on both Jetson and x86_64 platforms. - -> **Note**: All Isaac ROS quick start guides, tutorials, and examples have been designed with the Isaac ROS Docker images as a prerequisite. - -## Quickstart - -1. Set up your development environment by following the instructions [here](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_common/blob/main/docs/dev-env-setup.md). - -2. Clone this repository and its dependencies under `~/workspaces/isaac_ros-dev/src`: - - ```bash - mkdir -p ~/workspaces/isaac_ros-dev/src && cd ~/workspaces/isaac_ros-dev/src - ``` - - ```bash - git clone https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_common - ``` - - ```bash - git clone https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_nitros - ``` - - ```bash - git clone https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_image_pipeline - ``` - - ```bash - git clone https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_dnn_stereo_disparity - ``` - -3. Pull down a ROS Bag of sample data: - - ```bash - cd ~/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_disparity && \ - git lfs pull -X "" -I "resources/rosbags/ess_rosbag" - ``` - -4. Launch the Docker container using the `run_dev.sh` script: - - ```bash - cd ~/workspaces/isaac_ros-dev/src/isaac_ros_common && ./scripts/run_dev.sh - ``` - -5. Download the pre-trained ESS model from the [ESS model page](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/isaac/models/dnn_stereo_disparity): - - ```bash - cd /workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_disparity/resources && \ - wget 'https://api.ngc.nvidia.com/v2/models/nvidia/isaac/dnn_stereo_disparity/versions/1.1.0/files/ess.etlt' - ``` - -6. Convert the encrypted model (`.etlt`) to a TensorRT engine plan: - - ```bash - /opt/nvidia/tao/tao-converter -k ess -t fp16 -e /workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_disparity/resources/ess.engine -o output_left /workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_disparity/resources/ess.etlt - ``` - -7. Build and source the workspace: - - ```bash - cd /workspaces/isaac_ros-dev && \ - colcon build && \ - source install/setup.bash - ``` - - **Note**: We recommend rebuilding the package each time the source files are edited. Before rebuilding, first clean the workspace by running `rm -r build install log`. - -8. (Optional) Run tests to verify complete and correct installation: - - ```bash - colcon test --executor sequential - ``` - -9. Launch the ESS Disparity Node: - - ```bash - ros2 launch isaac_ros_ess isaac_ros_ess.launch.py engine_file_path:=/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_disparity/resources/ess.engine - ``` - -10. Open a **second terminal** and attach to the container: - - ```bash - cd ~/workspaces/isaac_ros-dev/src/isaac_ros_common && \ - ./scripts/run_dev.sh && \ - source install/setup.bash - ``` - -11. In the **second terminal**, visualize and validate the output of the package: +| Sample Graph

| Input Size

| AGX Orin

| Orin NX

| x86_64 w/ RTX 4060 Ti

| +|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [DNN Stereo Disparity Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts/isaac_ros_ess_node.py)


Full

| 576p



| [78.8 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_node-agx_orin.json)


4.1 ms

| [27.2 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_node-orin_nx.json)


6.2 ms

| [204 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_node-nuc_4060ti.json)


4.4 ms

| +| [DNN Stereo Disparity Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts/isaac_ros_light_ess_node.py)


Light

| 288p



| [288 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_node-agx_orin.json)


5.6 ms

| [128 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_node-orin_nx.json)


5.6 ms

| [350 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_node-nuc_4060ti.json)


4.2 ms

| +| [DNN Stereo Disparity Graph](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts/isaac_ros_ess_graph.py)


Full

| 576p



| [74.0 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_graph-agx_orin.json)


20 ms

| [26.1 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_graph-orin_nx.json)


42 ms

| [191 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_graph-nuc_4060ti.json)


11 ms

| +| [DNN Stereo Disparity Graph](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/scripts/isaac_ros_light_ess_graph.py)


Light

| 288p



| [260 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_graph-agx_orin.json)


13 ms

| [116 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_graph-orin_nx.json)


16 ms

| [350 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_graph-nuc_4060ti.json)


12 ms

| - ```bash - ros2 run isaac_ros_ess isaac_ros_ess_visualizer.py --enable_rosbag - ``` - -
- -## Next Steps - -### Try Advanced Examples - -To continue exploring the DNN Stereo Disparity package, check out the following suggested examples: - -- [Generating disparity maps from a stereo pair of image files](./docs/visualize-image.md) -- [Tutorial with RealSense and isaac_ros_ess](./docs/tutorial-ess-realsense.md) -- [Tutorial with Isaac Sim](./docs/tutorial-isaac-sim.md) - -### Try NITROS-Accelerated Graph with Argus - -If you have an Argus-compatible camera, you can launch the NITROS-accelerated graph by following the [tutorial](docs/tutorial-nitros-graph.md). - -### Use Different Models - -Click [here](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_dnn_inference/blob/main/docs/model-preparation.md) for more information about how to use NGC models. - -### Customize Your Dev Environment - -To customize your development environment, reference [this guide](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_common/blob/main/docs/modify-dockerfile.md). - -## Package Reference - -### `isaac_ros_ess` - -#### Overview - -The `isaac_ros_ess` package offers functionality to generate a stereo disparity map from stereo images using a trained ESS model. Given a pair of stereo input images, the package generates a continuous disparity image for the left input image. - -#### Usage - -```bash -ros2 launch isaac_ros_ess isaac_ros_ess.launch.py engine_file_path:= -``` - -#### ROS Parameters - -| ROS Parameter | Type | Default | Description | -| ------------------ | -------- | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `engine_file_path` | `string` | N/A - Required | The absolute path to the ESS engine file. | -| `image_type` | `string` | `"RGB_U8"`. | The input image encoding type. Supports `"RGB_U8"` and `"BGR_U8"`.
Note that if this parameter is not specified and there is an upstream Isaac ROS NITROS node, the type will be decided automatically. | - -#### ROS Topics Subscribed - -| ROS Topic | Interface | Description | -| ------------------- | -------------------------------------------------------------------------------------------------------------- | --------------------------------- | -| `left/image_rect` | [sensor_msgs/Image](https://github.com/ros2/common_interfaces/blob/humble/sensor_msgs/msg/Image.msg) | The left image of a stereo pair. | -| `right/image_rect` | [sensor_msgs/Image](https://github.com/ros2/common_interfaces/blob/humble/sensor_msgs/msg/Image.msg) | The right image of a stereo pair. | -| `left/camera_info` | [sensor_msgs/CameraInfo](https://github.com/ros2/common_interfaces/blob/humble/sensor_msgs/msg/CameraInfo.msg) | The left camera model. | -| `right/camera_info` | [sensor_msgs/CameraInfo](https://github.com/ros2/common_interfaces/blob/humble/sensor_msgs/msg/CameraInfo.msg) | The right camera model. | -> Note: The images on input topics (`left/image_rect` and `right/image_rect`) should be a color image either in `rgb8` or `bgr8` format. - -#### ROS Topics Published - -| ROS Topic | Interface | Description | -| ----------- | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------- | -| `disparity` | [stereo_msgs/DisparityImage](https://github.com/ros2/common_interfaces/blob/humble/stereo_msgs/msg/DisparityImage.msg) | The continuous stereo disparity estimation. | - -#### Input Restrictions - -1. The input left and right images must have the **same dimension and resolution**, and the resolution must be **no larger than `1920x1200`**. - -2. Each input pair (`left/image_rect`, `right/image_rect`, `left/camera_info` and `right/camera_info`) should have the **same timestamp**; otherwise, the synchronizing module inside the ESS Disparity Node will drop the input with smaller timestamps. - -#### Output Interpretations - -1. The `isaas_ros_ess` package outputs a disparity image with the same resolution as the input stereo pairs. The input images are rescaled to the ESS model input size before inferencing and the output prediction is rescaled back before publishing. To alter this behavior, use input images with the model-native resolution: `W=960, H=576`. - -2. The left and right `CameraInfo` are used to composite a `stereo_msgs/DisparityImage`. If you only care about the disparity image, and don't need the baseline and focal length information, you can pass dummy camera messages. - -## Troubleshooting - -### Isaac ROS Troubleshooting - -For solutions to problems with Isaac ROS, check [here](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_common/blob/main/docs/troubleshooting.md). - -### Deep Learning Troubleshooting - -For solutions to problems using DNN models, check [here](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_dnn_inference/blob/main/docs/troubleshooting.md). - -### Package not found while launching the visualizer script - -#### Symptom - -```bash -$ ros2 run isaac_ros_ess isaac_ros_ess_visualizer.py -Package 'isaac_ros_ess' not found -``` - -#### Solution - -Use the `colcon build --packages-up-to isaac_ros_ess` command to build `isaac_ros_ess`; do not use the `--symlink-install` option. Run `source install/setup.bash` after the build. - -### Problem reserving CacheChange in reader +--- -#### Symptom +## Documentation -When using a ROS bag as input, `isaac_ros_ess` throws an error if the input topics are published too fast: +Please visit the [Isaac ROS Documentation](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_dnn_stereo_depth/index.html) to learn how to use this repository. -```bash -[component_container-1] 2022-06-24 09:04:43.584 [RTPS_MSG_IN Error] (ID:281473268431152) Problem reserving CacheChange in reader: 01.0f.cd.10.ab.f2.65.b6.01.00.00.00|0.0.20.4 -> Function processDataMsg -``` +--- -#### Solution +## Packages -Make sure that the ROS bag has a reasonable size and publish rate. +* [`isaac_ros_ess`](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_dnn_stereo_depth/isaac_ros_ess/index.html) + * [Quickstart](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_dnn_stereo_depth/isaac_ros_ess/index.html#quickstart) + * [Try More Examples](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_dnn_stereo_depth/isaac_ros_ess/index.html#try-more-examples) + * [Troubleshooting](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_dnn_stereo_depth/isaac_ros_ess/index.html#troubleshooting) + * [API](https://nvidia-isaac-ros.github.io/repositories_and_packages/isaac_ros_dnn_stereo_depth/isaac_ros_ess/index.html#api) -## Updates +## Latest -| Date | Changes | -| ---------- | ------------------------------------------ | -| 2023-05-25 | Upgraded model (1.1.0) | -| 2023-04-05 | Source available GXF extensions | -| 2022-10-19 | Updated OSS licensing | -| 2022-08-31 | Update to be compatible with JetPack 5.0.2 | -| 2022-06-30 | Initial release | +Update 2023-10-18: Updated for ESS 3.0 with confidence thresholding in multiple resolutions. diff --git a/docs/tutorial-ess-realsense.md b/docs/tutorial-ess-realsense.md deleted file mode 100644 index cf6afbf..0000000 --- a/docs/tutorial-ess-realsense.md +++ /dev/null @@ -1,34 +0,0 @@ -# Tutorial for DNN Stereo Depth Estimation using a RealSense camera - -
- -## Overview - -This tutorial demonstrates how to perform stereo-camera-based reconstruction using a [RealSense](https://www.intel.com/content/www/us/en/architecture-and-technology/realsense-overview.html) camera and [isaac_ros_ess](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_dnn_stereo_disparity). - -> **Note**: This tutorial requires a compatible RealSense camera from the list available [here](https://github.com/NVIDIA-ISAAC-ROS/.github/blob/main/profile/realsense-setup.md#camera-compatibility). - -## Tutorial Walkthrough - -1. Complete the [realsense setup tutorial](https://github.com/NVIDIA-ISAAC-ROS/.github/blob/main/profile/realsense-setup.md). -2. Complete steps 1-2 described in the [Quickstart Guide](../README.md#quickstart). -3. Open a new terminal and launch the Docker container using the `run_dev.sh` script: - - ```bash - cd ~/workspaces/isaac_ros-dev/src/isaac_ros_common && \ - ./scripts/run_dev.sh - ``` - -4. Build and source the workspace: - - ```bash - cd /workspaces/isaac_ros-dev && \ - colcon build --symlink-install && \ - source install/setup.bash - ``` - -5. Run the launch file, which launches the example and waits for 10 seconds: - - ```bash - ros2 launch isaac_ros_ess isaac_ros_ess_realsense.launch.py engine_file_path:=/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_disparity/resources/ess.engine - ``` diff --git a/docs/tutorial-isaac-sim.md b/docs/tutorial-isaac-sim.md deleted file mode 100644 index 675bc76..0000000 --- a/docs/tutorial-isaac-sim.md +++ /dev/null @@ -1,47 +0,0 @@ -# Tutorial for DNN Stereo Depth Estimation with Isaac Sim - -
- -## Overview - -This tutorial walks you through a graph to [estimate depth](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_dnn_stereo_disparity) with stereo images from Isaac Sim. - -Last validated with [Isaac Sim 2022.2.1](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/release_notes.html#id1) - -## Tutorial Walkthrough - -1. Complete steps 1-7 listed in the [Quickstart section](../README.md#quickstart) of the main README. -2. Install and launch Isaac Sim following the steps in the [Isaac ROS Isaac Sim Setup Guide](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_common/blob/main/docs/isaac-sim-sil-setup.md) -3. Open the Isaac ROS Common USD scene (using the *Content* tab) located at: - - ```text - http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Isaac/2022.2.1/Isaac/Samples/ROS2/Scenario/carter_warehouse_apriltags_worker.usd - ``` - - Wait for the scene to load completely. -4. Go to the *Stage* tab and select `/World/Carter_ROS/ROS_Cameras/ros2_create_camera_right_info`, then in *Property* tab *-> OmniGraph Node -> Inputs -> stereoOffset X* change `0` to `-175.92`. -
-5. Enable the right camera for a stereo image pair. Go to the *Stage* tab and select `/World/Carter_ROS/ROS_Cameras/enable_camera_right`, then tick the *Condition* checkbox. -
-6. Press **Play** to start publishing data from the Isaac Sim application. -
-7. Open a second terminal and attach to the container: - - ```bash - cd ~/workspaces/isaac_ros-dev/src/isaac_ros_common && \ - ./scripts/run_dev.sh - ``` - -8. In the second terminal, start the `isaac_ros_ess` node using the launch files: - - ```bash - ros2 launch isaac_ros_ess isaac_ros_ess_isaac_sim.launch.py engine_file_path:=/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_disparity/resources/ess.engine - ``` - -9. Optionally, you can run the visualizer script to visualize the disparity image. - - ```bash - ros2 run isaac_ros_ess isaac_ros_ess_visualizer.py - ``` - -
diff --git a/docs/tutorial-nitros-graph.md b/docs/tutorial-nitros-graph.md deleted file mode 100644 index 967c5a2..0000000 --- a/docs/tutorial-nitros-graph.md +++ /dev/null @@ -1,45 +0,0 @@ -# Tutorial to Run NITROS-Accelerated Graph with Argus Camera - -```mermaid -graph LR; - argus_node("ArgusStereoNode (Raw Image)") --> left_rectify_node("RectifyNode (Rectified Image)"); - argus_node --> right_rectify_node("RectifyNode (Rectified Image)"); - left_rectify_node --> ess_node("ESSDisparityNode (DNN Inference)"); - right_rectify_node --> ess_node; - ess_node --> point_cloud_point("PointCloudNode (Point Cloud Output)"); -``` - -If you have an [Argus-compatible camera](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_argus_camera), you can also use the launch file provided in this package to start a fully NITROS-accelerated stereo disparity graph. - -To start the graph, follow the steps below: - -1. Follow the [Quickstart section](../README.md#quickstart) up to step 6 in the main README. - -2. Outside the container, clone an additional repository required to run Argus-compatible camera under `~/workspaces/isaac_ros-dev/src`. - - ```bash - cd ~/workspaces/isaac_ros-dev/src - ``` - - ```bash - git clone https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_argus_camera - ``` - -3. Inside the container, build and source the workspace: - - ```bash - cd /workspaces/isaac_ros-dev && \ - colcon build && \ - source install/setup.bash - ``` - -4. (Optional) Run tests to verify complete and correct installation: - - ```bash - colcon test --executor sequential - ``` - -5. Launch ESS Disparity Node: - - ```bash - ros2 launch isaac_ros_ess isaac_ros_argus_ess.launch.py engine_file_path:=/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_disparity/resources/ess.engine diff --git a/docs/visualize-image.md b/docs/visualize-image.md deleted file mode 100644 index 25f3f4c..0000000 --- a/docs/visualize-image.md +++ /dev/null @@ -1,37 +0,0 @@ -# Instructions to Generate Disparity Maps for Stereo Images - -These are instructions for generating a disparity map for a given stereo image pair. - -In addition to supporting the ROS Bag input type, the `isaac_ros_ess_visualizer.py` script also supports raw input images and camera info files. Follow the steps to generate a disparity estimation from raw inputs: - -1. Complete the [Quickstart](../README.md#quickstart) guide first. - -2. Pull the example data: - - ```bash - cd ~/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_disparity && \ - git lfs pull -X "" -I "resources/examples" - ``` - -3. Launch the ESS Disparity Node: - - ```bash - ros2 launch isaac_ros_ess isaac_ros_ess.launch.py engine_file_path:=/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_disparity/resources/ess.engine - ``` - -4. Visualize and validate the output of the package: - - ```bash - ros2 run isaac_ros_ess isaac_ros_ess_visualizer.py --raw_inputs - ``` - -
- -5. Try your own examples: - - ```bash - ros2 run isaac_ros_ess isaac_ros_ess_visualizer.py --raw_inputs \ - --left_image_path '' \ - --right_image_path '' \ - --camera_info_path '' - ``` diff --git a/isaac_ros_ess/CMakeLists.txt b/isaac_ros_ess/CMakeLists.txt index 1ab88da..126790a 100644 --- a/isaac_ros_ess/CMakeLists.txt +++ b/isaac_ros_ess/CMakeLists.txt @@ -15,7 +15,7 @@ # # SPDX-License-Identifier: Apache-2.0 -cmake_minimum_required(VERSION 3.23.2) +cmake_minimum_required(VERSION 3.22.1) project(isaac_ros_ess LANGUAGES C CXX) if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") @@ -28,13 +28,15 @@ ament_auto_find_build_dependencies() # isaac_ros_ess_node ament_auto_add_library(isaac_ros_ess_node SHARED src/ess_disparity_node.cpp) target_link_libraries(isaac_ros_ess_node) -rclcpp_components_register_nodes(isaac_ros_ess_node "nvidia::isaac_ros::dnn_stereo_disparity::ESSDisparityNode") -set(node_plugins "${node_plugins}nvidia::isaac_ros::dnn_stereo_disparity::ESSDisparityNode;$\n") +rclcpp_components_register_nodes(isaac_ros_ess_node "nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode") +set(node_plugins "${node_plugins}nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode;$\n") ### Install ESS extension built from source add_subdirectory(gxf/ess) install(TARGETS gxf_cvcore_ess DESTINATION share/${PROJECT_NAME}/gxf/lib/ess) +add_subdirectory(gxf/thresholder) +install(TARGETS gxf_thresholder DESTINATION share/${PROJECT_NAME}/gxf/lib/thresholder) ### End extensions @@ -45,8 +47,15 @@ if(BUILD_TESTING) find_package(ament_lint_auto REQUIRED) ament_lint_auto_find_test_dependencies() + + # The FindPythonInterp and FindPythonLibs modules are removed + if(POLICY CMP0148) + cmake_policy(SET CMP0148 OLD) + endif() + find_package(launch_testing_ament_cmake REQUIRED) add_launch_test(test/isaac_ros_ess_test.py TIMEOUT "300") + add_launch_test(test/isaac_ros_ess_test_1_16HD_model.py TIMEOUT "300") endif() ament_auto_package(INSTALL_TO_SHARE launch config) diff --git a/isaac_ros_ess/config/ess_inference.yaml b/isaac_ros_ess/config/ess_inference.yaml index 9c27871..1fe00f1 100644 --- a/isaac_ros_ess/config/ess_inference.yaml +++ b/isaac_ros_ess/config/ess_inference.yaml @@ -129,7 +129,7 @@ components: receiver: data_receiver_right min_size: 1 - name: adapter - type: nvidia::cvcore::tensor_ops::ImageAdapter + type: nvidia::isaac::tensor_ops::ImageAdapter parameters: message_type: VideoBuffer - name: data_transmitter @@ -137,6 +137,15 @@ components: parameters: capacity: 12 policy: 0 +- name: confidence_output + type: nvidia::gxf::DoubleBufferTransmitter + parameters: + capacity: 12 + policy: 0 +- type: nvidia::gxf::DownstreamReceptiveSchedulingTerm + parameters: + transmitter: confidence_output + min_size: 1 - type: nvidia::gxf::DownstreamReceptiveSchedulingTerm parameters: transmitter: data_transmitter @@ -147,27 +156,73 @@ components: storage_type: 1 block_size: 18432000 num_blocks: 40 -- type: nvidia::cvcore::ESS +- type: nvidia::isaac::ESSInference parameters: - output_name: disparity + output_name: frame pool: pool left_image_receiver: data_receiver_left right_image_receiver: data_receiver_right output_transmitter: data_transmitter + confidence_transmitter: confidence_output output_adapter: adapter image_type: RGB_U8 pixel_mean: [-128, -128, -128] normalization: [0.00392156862, 0.00392156862, 0.00392156862] standard_deviation: [0.5, 0.5, 0.5] max_batch_size: 1 - input_layer_width: 960 - input_layer_height: 576 model_input_type: RGB_U8 + onnx_file_path: onnx_file_path_placeholder engine_file_path: engine_file_path_placeholder input_layers_name: [input_left, input_right] - output_layers_name: [output_left] + output_layers_name: [output_left, output_conf] preprocess_type: RESIZE --- +name: disparity_thresholder +components: +- name: image_input + type: nvidia::gxf::DoubleBufferReceiver + parameters: + capacity: 12 + policy: 0 +- type: nvidia::gxf::MessageAvailableSchedulingTerm + parameters: + receiver: image_input + min_size: 1 +- name: confidence_input + type: nvidia::gxf::DoubleBufferReceiver + parameters: + capacity: 12 + policy: 0 +- type: nvidia::gxf::MessageAvailableSchedulingTerm + parameters: + receiver: confidence_input + min_size: 1 +- name: masked_output + type: nvidia::gxf::DoubleBufferTransmitter + parameters: + capacity: 12 + policy: 0 +- type: nvidia::gxf::DownstreamReceptiveSchedulingTerm + parameters: + transmitter: masked_output + min_size: 1 +- name: allocator + type: nvidia::gxf::BlockMemoryPool + parameters: + storage_type: 1 + block_size: 9216000 + num_blocks: 40 +- name: image_thresholder + type: nvidia::isaac::VideoBufferThresholder + parameters: + image_input: image_input + video_buffer_name: frame + mask_input: confidence_input + masked_output: masked_output + allocator: allocator + threshold: 0.9 + fill_value_float: -1.0 +--- name: disparity_compositor components: - name: disparity_in @@ -212,7 +267,7 @@ components: storage_type: 1 block_size: 18432000 num_blocks: 40 -- type: nvidia::isaac_ros::DisparityCompositor +- type: nvidia::isaac::DisparityCompositor parameters: left_camera_model_receiver: left_cam_receiver right_camera_model_receiver: right_cam_receiver @@ -258,6 +313,14 @@ components: - type: nvidia::gxf::Connection parameters: source: ess/data_transmitter + target: disparity_thresholder/image_input +- type: nvidia::gxf::Connection + parameters: + source: ess/confidence_output + target: disparity_thresholder/confidence_input +- type: nvidia::gxf::Connection + parameters: + source: disparity_thresholder/masked_output target: disparity_compositor/disparity_in - type: nvidia::gxf::Connection parameters: diff --git a/isaac_ros_ess/config/isaac_ros_ess_isaac_sim.rviz b/isaac_ros_ess/config/isaac_ros_ess_isaac_sim.rviz index a4a4e69..69d2995 100644 --- a/isaac_ros_ess/config/isaac_ros_ess_isaac_sim.rviz +++ b/isaac_ros_ess/config/isaac_ros_ess_isaac_sim.rviz @@ -7,7 +7,7 @@ Panels: - /Global Options1 - /Status1 Splitter Ratio: 0.5 - Tree Height: 271 + Tree Height: 335 - Class: rviz_common/Selection Name: Selection - Class: rviz_common/Tool Properties @@ -93,7 +93,7 @@ Visualization Manager: Durability Policy: Volatile History Policy: Keep Last Reliability Policy: Reliable - Value: /rgb_left + Value: /front_stereo_camera/left_rgb/image_resize Value: true - Class: rviz_default_plugins/Image Enabled: true @@ -107,7 +107,7 @@ Visualization Manager: Durability Policy: Volatile History Policy: Keep Last Reliability Policy: Reliable - Value: /rgb_right + Value: /front_stereo_camera/left_rgb/image_resize Value: true Enabled: true Global Options: @@ -155,35 +155,35 @@ Visualization Manager: Views: Current: Class: rviz_default_plugins/Orbit - Distance: 0.9596342444419861 + Distance: 4.150443077087402 Enable Stereo Rendering: Stereo Eye Separation: 0.05999999865889549 Stereo Focal Distance: 1 Swap Stereo Eyes: false Value: false Focal Point: - X: 0.10659492015838623 - Y: 0.048834092915058136 - Z: 0.3884413540363312 + X: 0.616879940032959 + Y: -0.006036696955561638 + Z: 1.438449501991272 Focal Shape Fixed Size: true Focal Shape Size: 0.05000000074505806 Invert Z Axis: false Name: Current View Near Clip Distance: 0.009999999776482582 - Pitch: 0.030397988855838776 + Pitch: -0.09460201114416122 Target Frame: Value: Orbit (rviz) - Yaw: 3.1303980350494385 + Yaw: 3.11539888381958 Saved: ~ Window Geometry: Displays: collapsed: false - Height: 945 + Height: 1066 Hide Left Dock: false Hide Right Dock: false Left Image: collapsed: false - QMainWindow State: 000000ff00000000fd00000004000000000000015600000317fc020000000afb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b00000198000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb00000014004c00650066007400200049006d00610067006501000001d9000000ba0000002800fffffffb000000160052006900670068007400200049006d0061006700650100000299000000b90000002800ffffff000000010000010f00000317fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003b00000317000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000005790000003efc0100000002fb0000000800540069006d00650100000000000005790000024400fffffffb0000000800540069006d00650100000000000004500000000000000000000003080000031700000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + QMainWindow State: 000000ff00000000fd0000000400000000000001b300000390fc020000000afb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000001d8000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb00000014004c00650066007400200049006d0061006700650100000219000000d90000002800fffffffb000000160052006900670068007400200049006d00610067006501000002f8000000d30000002800ffffff000000010000010000000390fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003b00000390000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007800000003efc0100000002fb0000000800540069006d00650100000000000007800000024400fffffffb0000000800540069006d00650100000000000004500000000000000000000004c10000039000000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Right Image: collapsed: false Selection: @@ -194,6 +194,6 @@ Window Geometry: collapsed: false Views: collapsed: false - Width: 1401 - X: 1285 - Y: 238 + Width: 1920 + X: 0 + Y: 267 diff --git a/isaac_ros_ess/config/isaac_ros_ess_zed.rviz b/isaac_ros_ess/config/isaac_ros_ess_zed.rviz new file mode 100644 index 0000000..7c5e75c --- /dev/null +++ b/isaac_ros_ess/config/isaac_ros_ess_zed.rviz @@ -0,0 +1,197 @@ +Panels: + - Class: rviz_common/Displays + Help Height: 138 + Name: Displays + Property Tree Widget: + Expanded: + - /Global Options1 + - /Status1 + Splitter Ratio: 0.5 + Tree Height: 480 + - Class: rviz_common/Selection + Name: Selection + - Class: rviz_common/Tool Properties + Expanded: + - /2D Goal Pose1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.5886790156364441 + - Class: rviz_common/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz_common/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: PointCloud2 +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.5 + Cell Size: 1 + Class: rviz_default_plugins/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.029999999329447746 + Value: Lines + Name: Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 10 + Reference Frame: + Value: true + - Class: rviz_default_plugins/Image + Enabled: true + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Image + Normalize Range: true + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /zed_node/left/image_rect_color_rgb + Value: true + - Class: rviz_default_plugins/Image + Enabled: true + Max Value: 1 + Median window: 5 + Min Value: 0 + Name: Image + Normalize Range: true + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /zed_node/right/image_rect_color_rgb + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz_default_plugins/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Selectable: true + Size (Pixels): 3 + Size (m): 0.009999999776482582 + Style: Flat Squares + Topic: + Depth: 5 + Durability Policy: Volatile + Filter size: 10 + History Policy: Keep Last + Reliability Policy: Reliable + Value: /points2 + Use Fixed Frame: true + Use rainbow: true + Value: true + Enabled: true + Global Options: + Background Color: 48; 48; 48 + Fixed Frame: zed2i_camera_center + Frame Rate: 30 + Name: root + Tools: + - Class: rviz_default_plugins/Interact + Hide Inactive Objects: true + - Class: rviz_default_plugins/MoveCamera + - Class: rviz_default_plugins/Select + - Class: rviz_default_plugins/FocusCamera + - Class: rviz_default_plugins/Measure + Line color: 128; 128; 0 + - Class: rviz_default_plugins/SetInitialPose + Covariance x: 0.25 + Covariance y: 0.25 + Covariance yaw: 0.06853891909122467 + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /initialpose + - Class: rviz_default_plugins/SetGoal + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /goal_pose + - Class: rviz_default_plugins/PublishPoint + Single click: true + Topic: + Depth: 5 + Durability Policy: Volatile + History Policy: Keep Last + Reliability Policy: Reliable + Value: /clicked_point + Transformation: + Current: + Class: rviz_default_plugins/TF + Value: true + Views: + Current: + Class: rviz_default_plugins/Orbit + Distance: 5.037819862365723 + Enable Stereo Rendering: + Stereo Eye Separation: 0.05999999865889549 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Focal Point: + X: 0 + Y: 0 + Z: 0 + Focal Shape Fixed Size: true + Focal Shape Size: 0.05000000074505806 + Invert Z Axis: false + Name: Current View + Near Clip Distance: 0.009999999776482582 + Pitch: 0.29539817571640015 + Target Frame: + Value: Orbit (rviz) + Yaw: 2.8435826301574707 + Saved: ~ +Window Geometry: + Displays: + collapsed: false + Height: 2272 + Hide Left Dock: false + Hide Right Dock: false + Image: + collapsed: false + QMainWindow State: 000000ff00000000fd0000000400000000000005a8000007f1fc020000000afb0000001200530065006c0065006300740069006f006e00000001e10000009b000000ab00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c0061007900730100000069000002ce0000017800fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d0061006700650100000343000002870000004500fffffffb0000000a0049006d00610067006501000005d6000002840000004500ffffff000000010000014f000007f1fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a005600690065007700730000000069000007f10000012300fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e1000001970000000300000e6c00000051fc0100000002fb0000000800540069006d0065010000000000000e6c0000045300fffffffb0000000800540069006d00650100000000000004500000000000000000000008b8000007f100000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: false + Width: 3692 + X: 148 + Y: 54 diff --git a/isaac_ros_ess/config/realsense.yaml b/isaac_ros_ess/config/realsense.yaml index 372d496..422ff0a 100644 --- a/isaac_ros_ess/config/realsense.yaml +++ b/isaac_ros_ess/config/realsense.yaml @@ -7,7 +7,7 @@ rgb_camera: color_qos: "SENSOR_DATA" depth_module: - profile: '640x480x60' + profile: '1280x720x30' emitter_enabled: 0 emitter_on_off: false depth_qos: "SENSOR_DATA" diff --git a/isaac_ros_ess/config/zed.yaml b/isaac_ros_ess/config/zed.yaml new file mode 100644 index 0000000..d06a05f --- /dev/null +++ b/isaac_ros_ess/config/zed.yaml @@ -0,0 +1,163 @@ +# config/common_yaml +# Common parameters to Stereolabs ZED and ZED mini cameras +# +# Note: the parameter svo_file is passed as exe argumet +--- +/**: + ros__parameters: + + general: + svo_file: "" # usually overwritten by launch file + svo_loop: false # Enable loop mode when using an SVO as input source + svo_realtime: true # if true the SVO will be played trying to respect the original framerate eventually skipping frames, otherwise every frame will be processed respecting the `pub_frame_rate` setting + camera_timeout_sec: 5 + camera_max_reconnect: 5 + camera_flip: false + zed_id: 0 # IMPORTANT: to be used in simulation to distinguish individual cameras im multi-camera configurations - usually overwritten by launch file + serial_number: 0 # usually overwritten by launch file + pub_resolution: 'MEDIUM' # The resolution used for output. 'HD2K', 'HD1080', 'HD1200', 'HD720', 'MEDIUM', 'SVGA', 'VGA', 'LOW' + pub_frame_rate: 15.0 # [DYNAMIC] - frequency of publishing of visual images and depth images + gpu_id: -1 + region_of_interest: '[]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #region_of_interest: '[[0.25,0.33],[0.75,0.33],[0.75,0.5],[0.5,0.75],[0.25,0.5]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #region_of_interest: '[[0.25,0.25],[0.75,0.25],[0.75,0.75],[0.25,0.75]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + #region_of_interest: '[[0.5,0.25],[0.75,0.5],[0.5,0.75],[0.25,0.5]]' # A polygon defining the ROI where the ZED SDK perform the processing ignoring the rest. Coordinates must be normalized to '1.0' to be resolution independent. + sdk_verbose: 1 + + video: + brightness: 4 # [DYNAMIC] Not available for ZED X/ZED X Mini + contrast: 4 # [DYNAMIC] Not available for ZED X/ZED X Mini + hue: 0 # [DYNAMIC] Not available for ZED X/ZED X Mini + saturation: 4 # [DYNAMIC] + sharpness: 4 # [DYNAMIC] + gamma: 8 # [DYNAMIC] + auto_exposure_gain: true # [DYNAMIC] + exposure: 80 # [DYNAMIC] + gain: 80 # [DYNAMIC] + auto_whitebalance: true # [DYNAMIC] + whitebalance_temperature: 42 # [DYNAMIC] - [28,65] works only if `auto_whitebalance` is false + qos_history: 1 # '1': KEEP_LAST - '2': KEEP_ALL + qos_depth: 1 # Queue size if using KEEP_LAST + qos_reliability: 1 # '1': RELIABLE - '2': BEST_EFFORT - + qos_durability: 2 # '1': TRANSIENT_LOCAL - '2': VOLATILE + + depth: + depth_mode: 'NONE' # Matches the ZED SDK setting: 'NONE', 'PERFORMANCE', 'QUALITY', 'ULTRA', 'NEURAL' - Note: if 'NONE' all the modules that requires depth extraction are disabled by default (Pos. Tracking, Obj. Detection, Mapping, ...) + depth_stabilization: 1 # Forces positional tracking to start if major than 0 - Range: [0,100] + openni_depth_mode: false # 'false': 32bit float [meters], 'true': 16bit unsigned int [millimeters] + point_cloud_freq: 10.0 # [DYNAMIC] - frequency of the pointcloud publishing (equal or less to `grab_frame_rate` value) + depth_confidence: 50 # [DYNAMIC] + depth_texture_conf: 100 # [DYNAMIC] + remove_saturated_areas: true # [DYNAMIC] + qos_history: 1 # '1': KEEP_LAST - '2': KEEP_ALL + qos_depth: 1 # Queue size if using KEEP_LAST + qos_reliability: 1 # '1': RELIABLE - '2': BEST_EFFORT - + qos_durability: 2 # '1': TRANSIENT_LOCAL - '2': VOLATILE + + pos_tracking: + pos_tracking_enabled: false # True to enable positional tracking from start + imu_fusion: true # enable/disable IMU fusion. When set to false, only the optical odometry will be used. + publish_tf: true # [usually overwritten by launch file] publish `odom -> base_link` TF + publish_map_tf: true # [usually overwritten by launch file] publish `map -> odom` TF + publish_imu_tf: true # [usually overwritten by launch file] enable/disable the IMU TF broadcasting + base_frame: "base_link" # usually overwritten by launch file + map_frame: "map" + odometry_frame: "odom" + area_memory_db_path: "" + area_memory: true # Enable to detect loop closure + depth_min_range: 0.0 # Set this value for removing fixed zones of the robot in the FoV of the camerafrom the visual odometry evaluation + set_as_static: false # If 'true' the camera will be static and not move in the environment + set_gravity_as_origin: true # If 'true' align the positional tracking world to imu gravity measurement. Keep the yaw from the user initial pose. + floor_alignment: false # Enable to automatically calculate camera/floor offset + initial_base_pose: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] # Initial position of the `base_frame` in the map -> [X, Y, Z, R, P, Y] + init_odom_with_first_valid_pose: true # Enable to initialize the odometry with the first valid pose + path_pub_rate: 2.0 # [DYNAMIC] - Camera trajectory publishing frequency + path_max_count: -1 # use '-1' for unlimited path size + two_d_mode: false # Force navigation on a plane. If true the Z value will be fixed to "fixed_z_value", roll and pitch to zero + fixed_z_value: 0.00 # Value to be used for Z coordinate if `two_d_mode` is true + transform_time_offset: 0.0 # The value added to the timestamp of `map->odom` and `odom->base_link`` transform being generated + qos_history: 1 # '1': KEEP_LAST - '2': KEEP_ALL + qos_depth: 1 # Queue size if using KEEP_LAST + qos_reliability: 1 # '1': RELIABLE - '2': BEST_EFFORT + qos_durability: 2 # '1': TRANSIENT_LOCAL - '2': VOLATILE + + gnss_fusion: + gnss_fusion_enabled: false # fuse 'sensor_msg/NavSatFix' message information into pose data + gnss_fix_topic: "/gps/fix" # Name of the GNSS topic of type NavSatFix to subscribe [Default: "/gps/fix"] + gnss_init_distance: 5.0 # The minimum distance traveled by the robot required to initilize the GNSS fusion + gnss_zero_altitude: false # Set to `true` to ignore GNSS altitude information + gnss_frame: "gnss_link" # [usually overwritten by launch file] The TF frame of the GNSS sensor + publish_utm_tf: true # Publish `utm` -> `map` TF + broadcast_utm_transform_as_parent_frame: false # if 'true' publish `utm` -> `map` TF, otherwise `map` -> `utm` + + mapping: + mapping_enabled: false # True to enable mapping and fused point cloud pubblication + resolution: 0.05 # maps resolution in meters [min: 0.01f - max: 0.2f] + max_mapping_range: 5.0 # maximum depth range while mapping in meters (-1 for automatic calculation) [2.0, 20.0] + fused_pointcloud_freq: 1.0 # frequency of the publishing of the fused colored point cloud + clicked_point_topic: "/clicked_point" # Topic published by Rviz when a point of the cloud is clicked. Used for plane detection + qos_history: 1 # '1': KEEP_LAST - '2': KEEP_ALL + qos_depth: 1 # Queue size if using KEEP_LAST + qos_reliability: 1 # '1': RELIABLE - '2': BEST_EFFORT - + qos_durability: 2 # '1': TRANSIENT_LOCAL - '2': VOLATILE + + sensors: + sensors_image_sync: false # Synchronize Sensors messages with latest published video/depth message + sensors_pub_rate: 200. # frequency of publishing of sensors data. MAX: 400. - MIN: grab rate + qos_history: 1 # '1': KEEP_LAST - '2': KEEP_ALL + qos_depth: 1 # Queue size if using KEEP_LAST + qos_reliability: 1 # '1': RELIABLE - '2': BEST_EFFORT - + qos_durability: 2 # '1': TRANSIENT_LOCAL - '2': VOLATILE + + object_detection: + od_enabled: false # True to enable Object Detection + model: 'MULTI_CLASS_BOX_MEDIUM' # 'MULTI_CLASS_BOX_FAST', 'MULTI_CLASS_BOX_MEDIUM', 'MULTI_CLASS_BOX_ACCURATE', 'PERSON_HEAD_BOX_FAST', 'PERSON_HEAD_BOX_ACCURATE' + allow_reduced_precision_inference: true # Allow inference to run at a lower precision to improve runtime and memory usage + max_range: 20.0 # [m] Defines a upper depth range for detections + confidence_threshold: 50.0 # [DYNAMIC] - Minimum value of the detection confidence of an object [0,99] + prediction_timeout: 0.5 # During this time [sec], the object will have OK state even if it is not detected. Set this parameter to 0 to disable SDK predictions + filtering_mode: 1 # '0': NONE - '1': NMS3D - '2': NMS3D_PER_CLASS + mc_people: false # [DYNAMIC] - Enable/disable the detection of persons for 'MULTI_CLASS_X' models + mc_vehicle: true # [DYNAMIC] - Enable/disable the detection of vehicles for 'MULTI_CLASS_X' models + mc_bag: true # [DYNAMIC] - Enable/disable the detection of bags for 'MULTI_CLASS_X' models + mc_animal: true # [DYNAMIC] - Enable/disable the detection of animals for 'MULTI_CLASS_X' models + mc_electronics: true # [DYNAMIC] - Enable/disable the detection of electronic devices for 'MULTI_CLASS_X' models + mc_fruit_vegetable: true # [DYNAMIC] - Enable/disable the detection of fruits and vegetables for 'MULTI_CLASS_X' models + mc_sport: true # [DYNAMIC] - Enable/disable the detection of sport-related objects for 'MULTI_CLASS_X' models + qos_history: 1 # '1': KEEP_LAST - '2': KEEP_ALL + qos_depth: 1 # Queue size if using KEEP_LAST + qos_reliability: 1 # '1': RELIABLE - '2': BEST_EFFORT + qos_durability: 2 # '1': TRANSIENT_LOCAL - '2': VOLATILE + + body_tracking: + bt_enabled: false # True to enable Body Tracking + model: 'HUMAN_BODY_MEDIUM' # 'HUMAN_BODY_FAST', 'HUMAN_BODY_MEDIUM', 'HUMAN_BODY_ACCURATE' + body_format: 'BODY_38' # 'BODY_18','BODY_34','BODY_38','BODY_70' + allow_reduced_precision_inference: false # Allow inference to run at a lower precision to improve runtime and memory usage + max_range: 20.0 # [m] Defines a upper depth range for detections + body_kp_selection: 'FULL' # 'FULL', 'UPPER_BODY' + enable_body_fitting: false # Defines if the body fitting will be applied + enable_tracking: true # Defines if the object detection will track objects across images flow + prediction_timeout_s: 0.5 # During this time [sec], the skeleton will have OK state even if it is not detected. Set this parameter to 0 to disable SDK predictions + confidence_threshold: 50.0 # [DYNAMIC] - Minimum value of the detection confidence of skeleton key points [0,99] + minimum_keypoints_threshold: 5 # [DYNAMIC] - Minimum number of skeleton key points to be detected for a valid skeleton + qos_history: 1 # '1': KEEP_LAST - '2': KEEP_ALL + qos_depth: 1 # Queue size if using KEEP_LAST + qos_reliability: 1 # '1': RELIABLE - '2': BEST_EFFORT + qos_durability: 2 # '1': TRANSIENT_LOCAL - '2': VOLATILE + + use_sim_time: false # EXPERIMENTAL (only for development) - Set to true to enable SIMULATION mode # + sim_address: '192.168.1.90' # EXPERIMENTAL (only for development) - The local address of the machine running the simulator + + debug: + debug_common: false + debug_video_depth: false + debug_camera_controls: false + debug_point_cloud: false + debug_positional_tracking: false + debug_gnss: false + debug_sensors: false + debug_mapping : false + debug_terrain_mapping : false + debug_object_detection : false + debug_body_tracking : false \ No newline at end of file diff --git a/isaac_ros_ess/gxf/ess/3dv/include/cv/ess/ESS.h b/isaac_ros_ess/gxf/ess/3dv/include/cv/ess/ESS.h deleted file mode 100644 index 277165d..0000000 --- a/isaac_ros_ess/gxf/ess/3dv/include/cv/ess/ESS.h +++ /dev/null @@ -1,203 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef NV_ESS_H_ -#define NV_ESS_H_ - -#include - -#include - -#include -#include -#include -#include - -namespace cvcore { namespace ess { - -/** - * Describes the algorithm supported for ESS Preprocessing - */ -enum class PreProcessType : uint8_t -{ - RESIZE = 0, // Resize to network dimensions without maintaining aspect ratio - CENTER_CROP // Crop to network dimensions from center of image -}; - -/** - * Describes the parameters for ESS Preprocessing - */ -struct ESSPreProcessorParams -{ - /* Preprocessing steps for ESS */ - PreProcessType preProcessType; -}; - -/** - * Default parameters for the preprocessing pipeline. - */ -CVCORE_API extern const ImagePreProcessingParams defaultPreProcessorParams; - -/** - * Default parameters to describe the input expected for the model. - */ -CVCORE_API extern const ModelInputParams defaultModelInputParams; - -/** - * Default parameters to describe the model inference parameters. - */ -CVCORE_API extern const ModelInferenceParams defaultInferenceParams; - -/** - * Default parameters for the ESS Preprocessing - */ -CVCORE_API extern const ESSPreProcessorParams defaultESSPreProcessorParams; - -/* - * Interface for running pre-processing on ESS model. - */ -class CVCORE_API ESSPreProcessor -{ -public: - /** - * Default constructor is deleted - */ - ESSPreProcessor() = delete; - - /** - * Constructor of ESSPreProcessor. - * @param preProcessorParams image pre-processing parameters. - * @param modelInputParams model paramters for network. - * @param essPreProcessorParams paramaters specific for ess preprocessing. - */ - ESSPreProcessor(const ImagePreProcessingParams &preProcessorParams, const ModelInputParams &modelInputParams, - const ESSPreProcessorParams &essPreProcessorParams); - - /** - * Destructor of ESSPreProcessor. - */ - ~ESSPreProcessor(); - - /** - * Main interface to run pre-processing. - * @param stream cuda stream. - */ - - void execute(Tensor &leftOutput, Tensor &rightOutput, - const Tensor &leftInput, const Tensor &rightInput, - cudaStream_t stream = 0); - -private: - /** - * Implementation of ESSPreProcessor. - */ - struct ESSPreProcessorImpl; - std::unique_ptr m_pImpl; -}; - -/** - * ESS parameters and implementation - */ -class CVCORE_API ESS -{ -public: - /** - * Constructor for ESS. - * @param imgparams image pre-processing parameters. - * @param modelInputParams model parameters for network. - * @param modelInferParams model input inference parameters. - * @param essPreProcessorParams paramaters specific for ess preprocessing. - */ - ESS(const ImagePreProcessingParams &imgparams, const ModelInputParams &modelParams, - const ModelInferenceParams &modelInferParams, const ESSPreProcessorParams &essPreProcessorParams); - - /** - * Default constructor not supported. - */ - ESS() = delete; - - /** - * Destructor for ESS. - */ - ~ESS(); - - /** - * Inference function for a given BGR image - * @param disparityMap Disparity map (CPU/GPU tensor supported) - * @param leftInput RGB/BGR Interleaved Left image (Only GPU Input Tensor - * supported) - * @param rightInput RGB/BGR Interleaved Right image (Only GPU Input Tensor - * supported) - * @param stream Cuda stream - */ - void execute(Tensor &disparityMap, const Tensor &leftInput, - const Tensor &rightInput, cudaStream_t stream = 0); - - /** - * Inference function for a given Preprocessed image - * @param disparityMap Disparity map (CPU/GPU tensor supported) - * @param leftInput RGB Planar Left image resized to network input dimensions (Only GPU Input Tensor - * supported) - * @param rightInput RGB Planar Right image resized to network input dimensions (Only GPU Input Tensor - * supported) - * @param stream Cuda stream - */ - void execute(Tensor &disparityMap, const Tensor &leftInput, - const Tensor &rightInput, cudaStream_t stream = 0); - -private: - struct ESSImpl; - std::unique_ptr m_pImpl; -}; - -/** - * ESS parameters and implementation - */ -class CVCORE_API ESSPostProcessor -{ -public: - /** - * Constructor for ESS. - * @param modelInputParams model parameters for network. - */ - ESSPostProcessor(const ModelInputParams &modelParams); - /** - * Default constructor not supported. - */ - ESSPostProcessor() = delete; - - /** - * Destructor for ESS. - */ - ~ESSPostProcessor(); - - /** - * Inference function for a given BGR image - * @param outputdisparityMap Disparity map rescaled to orginal resolution (CPU/GPU tensor) - * @param inputDisparityMap input Disparity map (GPU tensor) - * @param stream Cuda stream - */ - void execute(Tensor &outputdisparityMap, const Tensor &inputdisparityMap, - cudaStream_t stream = 0); - -private: - struct ESSPostProcessorImpl; - std::unique_ptr m_pImpl; -}; - -}} // namespace cvcore::ess -#endif diff --git a/isaac_ros_ess/gxf/ess/3dv/src/ESS.cpp b/isaac_ros_ess/gxf/ess/3dv/src/ESS.cpp deleted file mode 100644 index 151545c..0000000 --- a/isaac_ros_ess/gxf/ess/3dv/src/ESS.cpp +++ /dev/null @@ -1,230 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include - -#include - -#ifdef NVBENCH_ENABLE -#include -#endif - -#include -#include -#include -#include -#include - -#include - -namespace cvcore { namespace ess { - -/* Default parameters used for the model provided*/ -const ImagePreProcessingParams defaultPreProcessorParams = { - BGR_U8, /**< Input type of image.*/ - {-128, -128, -128}, /**< Mean value */ - {1.0 / 255.0, 1.0 / 255.0, 1.0 / 255.0}, /**< Normalization factor */ - {0.5, 0.5, 0.5}}; /**< Standard deviation */ - -const ModelInputParams defaultModelInputParams = {1, /**< Max batch size supported */ - 960, /**< Input layer width of the network */ - 576, /**< Input layer height of the network */ - RGB_U8}; /**< Input image type the network is trained with */ - -const ModelInferenceParams defaultInferenceParams = { - "models/ess.engine", /**< Engine file path */ - {"input_left", "input_right"}, /**< Input layer name of the model */ - {"output_left"}}; /**< Output layer name of the network */ - -const ESSPreProcessorParams defaultESSPreProcessorParams = { - PreProcessType::RESIZE}; // Resize the input image to the network input dimensions - -struct ESS::ESSImpl -{ - inferencer::TensorRTInferenceParams tensorrtParams; - inferencer::InferenceBackendClient client; - - // Model includes 2 input layers and 1 output layer - Tensor m_outputDevice; - Tensor m_inputLeftPlanar; - Tensor m_inputRightPlanar; - - // Preprocess and PostProcess Objects - std::unique_ptr m_preprocess; - std::unique_ptr m_postprocess; - - // Max batch Size supported - size_t m_maxBatchSize; - - std::string m_leftInputLayerName, m_rightInputLayerName; - - size_t m_networkInputWidth, m_networkInputHeight; - - ESSImpl(const ImagePreProcessingParams &imgParams, const ModelInputParams &modelParams, - const ModelInferenceParams &modelInferParams, const ESSPreProcessorParams &essParams) - : m_maxBatchSize(modelParams.maxBatchSize) - { - if (modelInferParams.inputLayers.size() != 2 || modelInferParams.outputLayers.size() != 1 || - modelParams.maxBatchSize <= 0) - { - throw ErrorCode::INVALID_ARGUMENT; - } - - // Initialize Preprocessor and postprocessor - m_preprocess.reset(new ESSPreProcessor(imgParams, modelParams, essParams)); - m_postprocess.reset(new ESSPostProcessor(modelParams)); - - // Initialize TRT backend - tensorrtParams = {inferencer::TRTInferenceType::TRT_ENGINE, - nullptr, - modelInferParams.engineFilePath, - modelParams.maxBatchSize, - modelInferParams.inputLayers, - modelInferParams.outputLayers}; - - std::error_code err = - inferencer::InferenceBackendFactory::CreateTensorRTInferenceBackendClient(client, tensorrtParams); - - if (err != cvcore::make_error_code(cvcore::ErrorCode::SUCCESS)) - { - throw err; - } - - inferencer::ModelMetaData modelInfo = client->getModelMetaData(); - - m_networkInputHeight = modelParams.inputLayerHeight; - m_networkInputWidth = modelParams.inputLayerWidth; - m_inputLeftPlanar = {m_networkInputWidth, m_networkInputHeight, modelParams.maxBatchSize, false}; - m_inputRightPlanar = {m_networkInputWidth, m_networkInputHeight, modelParams.maxBatchSize, false}; - size_t outputWidth = modelInfo.outputLayers[modelInferParams.outputLayers[0]].shape[2]; - size_t outputHeight = modelInfo.outputLayers[modelInferParams.outputLayers[0]].shape[1]; - m_outputDevice = {outputWidth, outputHeight, modelParams.maxBatchSize, false}; - m_leftInputLayerName = modelInferParams.inputLayers[0]; - m_rightInputLayerName = modelInferParams.inputLayers[1]; - CHECK_ERROR_CODE(client->setInput(m_inputLeftPlanar, modelInferParams.inputLayers[0])); - CHECK_ERROR_CODE(client->setInput(m_inputRightPlanar, modelInferParams.inputLayers[1])); - CHECK_ERROR_CODE(client->setOutput(m_outputDevice, modelInferParams.outputLayers[0])); - } - - ~ESSImpl() - { - CHECK_ERROR_CODE(client->unregister()); - inferencer::InferenceBackendFactory::DestroyTensorRTInferenceBackendClient(client); - } - - void execute(Tensor &output, const Tensor &inputLeft, - const Tensor &inputRight, cudaStream_t stream) - { - size_t batchSize = inputLeft.getDepth(); - if (inputLeft.isCPU() || inputRight.isCPU()) - { - throw std::invalid_argument("ESS : Input type should be GPU buffer"); - } - - if (inputLeft.getDepth() > m_maxBatchSize || inputRight.getDepth() > m_maxBatchSize) - { - throw std::invalid_argument("ESS : Input batch size cannot exceed max batch size\n"); - } - - if (inputLeft.getDepth() != inputRight.getDepth() || output.getDepth() != inputLeft.getDepth()) - { - throw std::invalid_argument("ESS : Batch size of input and output images don't match!\n"); - } - m_preprocess->execute(m_inputLeftPlanar, m_inputRightPlanar, inputLeft, inputRight, stream); - -#ifdef NVBENCH_ENABLE - size_t inputWidth = inputLeft.getWidth(); - size_t inputHeight = inputLeft.getHeight(); - const std::string testName = "ESSInference_batch" + std::to_string(batchSize) + "_" + - std::to_string(inputWidth) + "x" + std::to_string(inputHeight) + "x" + - std::to_string(inputLeft.getChannelCount()) + "_GPU"; - nv::bench::Timer timerFunc = nv::bench::GPU(testName.c_str(), nv::bench::Flag::DEFAULT, stream); -#endif - - CHECK_ERROR_CODE(client->setCudaStream(stream)); - CHECK_ERROR_CODE(client->infer(batchSize)); - // PostProcess - m_postprocess->execute(output, m_outputDevice, stream); - } - - void execute(Tensor &output, const Tensor &inputLeft, - const Tensor &inputRight, cudaStream_t stream) - { - size_t batchSize = inputLeft.getDepth(); - if (inputLeft.isCPU() || inputRight.isCPU()) - { - throw std::invalid_argument("ESS : Input type should be GPU buffer"); - } - - if (inputLeft.getDepth() > m_maxBatchSize || inputRight.getDepth() > m_maxBatchSize) - { - throw std::invalid_argument("ESS : Input batch size cannot exceed max batch size\n"); - } - - if (inputLeft.getDepth() != inputRight.getDepth() || output.getDepth() != inputLeft.getDepth()) - { - throw std::invalid_argument("ESS : Batch size of input and output images don't match!\n"); - } - - if (inputLeft.getWidth() != m_networkInputWidth || inputLeft.getHeight() != m_networkInputHeight) - { - throw std::invalid_argument("ESS : Left preprocessed input does not match network input dimensions!\n"); - } - - if (inputRight.getWidth() != m_networkInputWidth || inputRight.getHeight() != m_networkInputHeight) - { - throw std::invalid_argument("ESS : Right preprocessed input does not match network input dimensions!\n"); - } -#ifdef NVBENCH_ENABLE - size_t inputWidth = inputLeft.getWidth(); - size_t inputHeight = inputLeft.getHeight(); - const std::string testName = "ESSInference_batch" + std::to_string(batchSize) + "_" + - std::to_string(inputWidth) + "x" + std::to_string(inputHeight) + "x" + - std::to_string(inputLeft.getChannelCount()) + "_GPU"; - nv::bench::Timer timerFunc = nv::bench::GPU(testName.c_str(), nv::bench::Flag::DEFAULT, stream); -#endif - // inference - CHECK_ERROR_CODE(client->setInput(inputLeft, m_leftInputLayerName)); - CHECK_ERROR_CODE(client->setInput(inputRight, m_rightInputLayerName)); - - CHECK_ERROR_CODE(client->setCudaStream(stream)); - CHECK_ERROR_CODE(client->infer(batchSize)); - // PostProcess - m_postprocess->execute(output, m_outputDevice, stream); - } -}; - -ESS::ESS(const ImagePreProcessingParams &imgParams, const ModelInputParams &modelParams, - const ModelInferenceParams &modelInferParams, const ESSPreProcessorParams &essParams) - : m_pImpl(new ESSImpl(imgParams, modelParams, modelInferParams, essParams)) -{ -} - -ESS::~ESS() {} - -void ESS::execute(Tensor &output, const Tensor &inputLeft, - const Tensor &inputRight, cudaStream_t stream) -{ - m_pImpl->execute(output, inputLeft, inputRight, stream); -} - -void ESS::execute(Tensor &output, const Tensor &inputLeft, - const Tensor &inputRight, cudaStream_t stream) -{ - m_pImpl->execute(output, inputLeft, inputRight, stream); -} -}} // namespace cvcore::ess diff --git a/isaac_ros_ess/gxf/ess/3dv/src/ESSPostProcess.cpp b/isaac_ros_ess/gxf/ess/3dv/src/ESSPostProcess.cpp deleted file mode 100644 index ed823a0..0000000 --- a/isaac_ros_ess/gxf/ess/3dv/src/ESSPostProcess.cpp +++ /dev/null @@ -1,129 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include - -#include - -#ifdef NVBENCH_ENABLE -#include -#endif - -#include -#include -#include -#include - -namespace cvcore { namespace ess { - -struct ESSPostProcessor::ESSPostProcessorImpl -{ - ESSPostProcessorImpl(const ModelInputParams &modelParams) - : m_maxBatchSize(modelParams.maxBatchSize) - , m_networkWidth(modelParams.inputLayerWidth) - , m_networkHeight(modelParams.inputLayerHeight) - { - m_scaledDisparityDevice = {m_networkWidth, m_networkHeight, m_maxBatchSize, false}; - m_outputDisparityDevice = {m_networkWidth, m_networkHeight, m_maxBatchSize, false}; - } - - void resizeBuffers(std::size_t width, std::size_t height) - { - if (m_outputDisparityDevice.getWidth() == width && m_outputDisparityDevice.getHeight() == height) - { - return; - } - m_outputDisparityDevice = {width, height, m_maxBatchSize, false}; - } - - void process(Tensor &outputDisparity, const Tensor &inputDisparity, - cudaStream_t stream) - { - if (inputDisparity.isCPU()) - { - throw std::invalid_argument("ESSPostProcessor : Input Tensor must be GPU Tensor."); - } - - if (inputDisparity.getWidth() != m_networkWidth || inputDisparity.getHeight() != m_networkHeight) - { - throw std::invalid_argument( - "ESSPostProcessor : Input Tensor dimension " - "does not match network input " - "requirement"); - } - - if (inputDisparity.getDepth() != outputDisparity.getDepth()) - { - throw std::invalid_argument("ESSPostProcessor : Input/Output Tensor batchsize mismatch."); - } - - const size_t batchSize = inputDisparity.getDepth(); - if (batchSize > m_maxBatchSize) - { - throw std::invalid_argument("ESSPostProcessor : Input batchsize exceeds Max Batch size."); - } - const size_t outputWidth = outputDisparity.getWidth(); - const size_t outputHeight = outputDisparity.getHeight(); - - // Disparity map values are scaled based on the outputWidth/networkInputWidth ratio - const float scale = static_cast(outputWidth) / m_networkWidth; - Tensor scaledDisparity(m_scaledDisparityDevice.getWidth(), m_scaledDisparityDevice.getHeight(), - batchSize, m_scaledDisparityDevice.getData(), false); - - tensor_ops::Normalize(scaledDisparity, inputDisparity, scale, 0, stream); - if (!outputDisparity.isCPU()) - { - tensor_ops::Resize(outputDisparity, m_scaledDisparityDevice, stream); - } - else - { - resizeBuffers(outputWidth, outputHeight); - Tensor outputDisparityDevice(m_outputDisparityDevice.getWidth(), - m_outputDisparityDevice.getHeight(), batchSize, - m_outputDisparityDevice.getData(), false); - tensor_ops::Resize(outputDisparityDevice, m_scaledDisparityDevice, stream); - cvcore::Copy(outputDisparity, outputDisparityDevice, stream); - CHECK_ERROR(cudaStreamSynchronize(stream)); - } - } - - size_t m_maxBatchSize; - size_t m_networkWidth, m_networkHeight; - Tensor m_scaledDisparityDevice; - Tensor m_outputDisparityDevice; -}; - -void ESSPostProcessor::execute(Tensor &outputDisparity, const Tensor &inputDisparity, - cudaStream_t stream) -{ -#ifdef NVBENCH_ENABLE - const std::string testName = "ESSPostProcessor_batch" + std::to_string(outputDisparity.getDepth()) + "_" + - std::to_string(outputDisparity.getWidth()) + "x" + - std::to_string(outputDisparity.getHeight()) + "_GPU"; - nv::bench::Timer timerFunc = nv::bench::GPU(testName.c_str(), nv::bench::Flag::DEFAULT, stream); -#endif - m_pImpl->process(outputDisparity, inputDisparity, stream); -} - -ESSPostProcessor::ESSPostProcessor(const ModelInputParams &modelInputParams) - : m_pImpl(new ESSPostProcessor::ESSPostProcessorImpl(modelInputParams)) -{ -} - -ESSPostProcessor::~ESSPostProcessor() {} - -}} // namespace cvcore::ess diff --git a/isaac_ros_ess/gxf/ess/3dv/src/ESSPreProcess.cpp b/isaac_ros_ess/gxf/ess/3dv/src/ESSPreProcess.cpp deleted file mode 100644 index 001c528..0000000 --- a/isaac_ros_ess/gxf/ess/3dv/src/ESSPreProcess.cpp +++ /dev/null @@ -1,190 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include - -#include - -#ifdef NVBENCH_ENABLE -#include -#endif - -#include -#include -#include -#include - -namespace cvcore { namespace ess { - -struct ESSPreProcessor::ESSPreProcessorImpl -{ - size_t m_maxBatchSize; - size_t m_outputWidth; - size_t m_outputHeight; - PreProcessType m_processType; - ImagePreProcessingParams m_preProcessorParams; - Tensor m_resizedDeviceLeftInput; - Tensor m_resizedDeviceRightInput; - Tensor m_normalizedDeviceLeftInput; - Tensor m_normalizedDeviceRightInput; - bool m_swapRB; - - ESSPreProcessorImpl(const ImagePreProcessingParams &imgParams, const ModelInputParams &modelParams, - const ESSPreProcessorParams &essParams) - : m_maxBatchSize(modelParams.maxBatchSize) - , m_outputHeight(modelParams.inputLayerHeight) - , m_outputWidth(modelParams.inputLayerWidth) - , m_processType(essParams.preProcessType) - , m_preProcessorParams(imgParams) - { - if (imgParams.imgType != BGR_U8 && imgParams.imgType != RGB_U8) - { - throw std::invalid_argument("ESSPreProcessor : Only image types RGB_U8/BGR_U8 are supported\n"); - } - m_resizedDeviceLeftInput = {modelParams.inputLayerWidth, modelParams.inputLayerHeight, modelParams.maxBatchSize, - false}; - m_resizedDeviceRightInput = {modelParams.inputLayerWidth, modelParams.inputLayerHeight, - modelParams.maxBatchSize, false}; - m_normalizedDeviceLeftInput = {modelParams.inputLayerWidth, modelParams.inputLayerHeight, - modelParams.maxBatchSize, false}; - m_normalizedDeviceRightInput = {modelParams.inputLayerWidth, modelParams.inputLayerHeight, - modelParams.maxBatchSize, false}; - m_swapRB = imgParams.imgType != modelParams.modelInputType; - } - - void process(Tensor &outputLeft, Tensor &outputRight, - const Tensor &inputLeft, const Tensor &inputRight, cudaStream_t stream) - { - if (inputLeft.isCPU() || inputRight.isCPU() || outputLeft.isCPU() || outputRight.isCPU()) - { - throw std::invalid_argument("ESSPreProcessor : Input/Output Tensor must be GPU Tensor."); - } - - if (outputLeft.getWidth() != m_outputWidth || outputLeft.getHeight() != m_outputHeight || - outputRight.getWidth() != m_outputWidth || outputRight.getHeight() != m_outputHeight) - { - throw std::invalid_argument( - "ESSPreProcessor : Output Tensor dimension does not match network input requirement"); - } - - if (inputLeft.getWidth() != inputRight.getWidth() || inputLeft.getHeight() != inputRight.getHeight()) - { - throw std::invalid_argument("ESSPreProcessor : Input tensor dimensions don't match"); - } - - if (outputLeft.getDepth() != inputLeft.getDepth() || outputRight.getDepth() != inputRight.getDepth() || - inputLeft.getDepth() != inputRight.getDepth()) - { - throw std::invalid_argument("ESSPreProcessor : Input/Output Tensor batchsize mismatch."); - } - - if (outputLeft.getDepth() > m_maxBatchSize) - { - throw std::invalid_argument("ESSPreProcessor : Input/Output batchsize exceeds max batch size."); - } - - const size_t batchSize = inputLeft.getDepth(); - const size_t inputWidth = inputLeft.getWidth(); - const size_t inputHeight = inputLeft.getHeight(); - - if (m_processType == PreProcessType::RESIZE) - { - tensor_ops::Resize(m_resizedDeviceLeftInput, inputLeft, stream); - tensor_ops::Resize(m_resizedDeviceRightInput, inputRight, stream); - } - else - { - const float centerX = inputWidth / 2; - const float centerY = inputHeight / 2; - const float offsetX = m_outputWidth / 2; - const float offsetY = m_outputHeight / 2; - BBox srcCrop, dstCrop; - dstCrop = {0, 0, static_cast(m_outputWidth - 1), static_cast(m_outputHeight - 1)}; - srcCrop.xmin = std::max(0, static_cast(centerX - offsetX)); - srcCrop.ymin = std::max(0, static_cast(centerY - offsetY)); - srcCrop.xmax = std::min(static_cast(m_outputWidth - 1), static_cast(centerX + offsetX)); - srcCrop.ymax = std::min(static_cast(m_outputHeight - 1), static_cast(centerY + offsetY)); - for (size_t i = 0; i < batchSize; i++) - { - Tensor inputLeftCrop( - inputWidth, inputHeight, - const_cast(inputLeft.getData()) + i * inputLeft.getStride(TensorDimension::DEPTH), - false); - Tensor outputLeftCrop( - m_outputWidth, m_outputHeight, - m_resizedDeviceLeftInput.getData() + i * m_resizedDeviceLeftInput.getStride(TensorDimension::DEPTH), - false); - Tensor inputRightCrop( - inputWidth, inputHeight, - const_cast(inputRight.getData()) + i * inputRight.getStride(TensorDimension::DEPTH), - false); - Tensor outputRightCrop(m_outputWidth, m_outputHeight, - m_resizedDeviceRightInput.getData() + - i * m_resizedDeviceRightInput.getStride(TensorDimension::DEPTH), - false); - tensor_ops::CropAndResize(outputLeftCrop, inputLeftCrop, dstCrop, srcCrop, - tensor_ops::InterpolationType::INTERP_LINEAR, stream); - tensor_ops::CropAndResize(outputRightCrop, inputRightCrop, dstCrop, srcCrop, - tensor_ops::InterpolationType::INTERP_LINEAR, stream); - } - } - - if (m_swapRB) - { - tensor_ops::ConvertColorFormat(m_resizedDeviceLeftInput, m_resizedDeviceLeftInput, tensor_ops::BGR2RGB, - stream); - tensor_ops::ConvertColorFormat(m_resizedDeviceRightInput, m_resizedDeviceRightInput, tensor_ops::BGR2RGB, - stream); - } - - float scale[3]; - for (size_t i = 0; i < 3; i++) - { - scale[i] = m_preProcessorParams.normalization[i] / m_preProcessorParams.stdDev[i]; - } - - tensor_ops::Normalize(m_normalizedDeviceLeftInput, m_resizedDeviceLeftInput, scale, - m_preProcessorParams.pixelMean, stream); - tensor_ops::Normalize(m_normalizedDeviceRightInput, m_resizedDeviceRightInput, scale, - m_preProcessorParams.pixelMean, stream); - tensor_ops::InterleavedToPlanar(outputLeft, m_normalizedDeviceLeftInput, stream); - tensor_ops::InterleavedToPlanar(outputRight, m_normalizedDeviceRightInput, stream); - } -}; - -void ESSPreProcessor::execute(Tensor &outputLeft, Tensor &outputRight, - const Tensor &inputLeft, const Tensor &inputRight, - cudaStream_t stream) -{ -#ifdef NVBENCH_ENABLE - const std::string testName = "ESSPreProcessor_batch" + std::to_string(inputLeft.getDepth()) + "_" + - std::to_string(inputLeft.getWidth()) + "x" + std::to_string(inputLeft.getHeight()) + - "_GPU"; - nv::bench::Timer timerFunc = nv::bench::GPU(testName.c_str(), nv::bench::Flag::DEFAULT, stream); -#endif - m_pImpl->process(outputLeft, outputRight, inputLeft, inputRight, stream); -} - -ESSPreProcessor::ESSPreProcessor(const ImagePreProcessingParams &preProcessorParams, - const ModelInputParams &modelInputParams, const ESSPreProcessorParams &essParams) - : m_pImpl(new ESSPreProcessor::ESSPreProcessorImpl(preProcessorParams, modelInputParams, essParams)) -{ -} - -ESSPreProcessor::~ESSPreProcessor() {} - -}} // namespace cvcore::ess diff --git a/isaac_ros_ess/gxf/ess/CMakeLists.txt b/isaac_ros_ess/gxf/ess/CMakeLists.txt index b9a8537..cb8ddfc 100644 --- a/isaac_ros_ess/gxf/ess/CMakeLists.txt +++ b/isaac_ros_ess/gxf/ess/CMakeLists.txt @@ -23,7 +23,8 @@ endif() # Dependencies find_package(CUDAToolkit) -include(YamlCpp) +find_package(yaml-cpp) +find_package(isaac_ros_image_proc REQUIRED) find_package(GXF ${ISAAC_ROS_GXF_VERSION} MODULE REQUIRED COMPONENTS core @@ -33,118 +34,51 @@ find_package(GXF ${ISAAC_ROS_GXF_VERSION} MODULE REQUIRED ) find_package(TENSORRT) +set(CMAKE_INCLUDE_CURRENT_DIR TRUE) + # Create extension add_library(gxf_cvcore_ess SHARED - TensorOps.cpp - ESS.cpp - extensions/ess/ESS.hpp + extensions/ess/ess.cpp + extensions/ess/components/ess_inference.cpp + extensions/ess/components/ess_inference.hpp - extensions/tensor_ops/CameraModel.cpp - extensions/tensor_ops/CameraModel.hpp - extensions/tensor_ops/ConvertColorFormat.cpp - extensions/tensor_ops/ConvertColorFormat.hpp - extensions/tensor_ops/CropAndResize.cpp - extensions/tensor_ops/CropAndResize.hpp - extensions/tensor_ops/Frame3D.cpp - extensions/tensor_ops/Frame3D.hpp - extensions/tensor_ops/ImageAdapter.cpp - extensions/tensor_ops/ImageAdapter.hpp - extensions/tensor_ops/ImageUtils.cpp - extensions/tensor_ops/ImageUtils.hpp - extensions/tensor_ops/InterleavedToPlanar.cpp - extensions/tensor_ops/InterleavedToPlanar.hpp - extensions/tensor_ops/Normalize.cpp - extensions/tensor_ops/Normalize.hpp - extensions/tensor_ops/Reshape.cpp - extensions/tensor_ops/Reshape.hpp - extensions/tensor_ops/Resize.cpp - extensions/tensor_ops/Resize.hpp - extensions/tensor_ops/TensorOperator.cpp - extensions/tensor_ops/TensorOperator.hpp - extensions/tensor_ops/TensorStream.cpp - extensions/tensor_ops/TensorStream.hpp - extensions/tensor_ops/Undistort.cpp - extensions/tensor_ops/Undistort.hpp - extensions/tensor_ops/detail/ImageAdapterTensorImpl.cpp - extensions/tensor_ops/detail/ImageAdapterTensorImpl.hpp - extensions/tensor_ops/detail/ImageAdapterVideoBufferImpl.cpp - extensions/tensor_ops/detail/ImageAdapterVideoBufferImpl.hpp + extensions/ess/inference/ESS.cpp + extensions/ess/inference/ESS.h + extensions/ess/inference/ESSPostProcess.cpp + extensions/ess/inference/ESSPreProcess.cpp ) +target_include_directories(gxf_cvcore_ess PRIVATE ${isaac_ros_image_proc_INCLUDE_DIRS}) -set(CMAKE_INCLUDE_CURRENT_DIR TRUE) -target_include_directories(gxf_cvcore_ess PRIVATE - ${CMAKE_CURRENT_SOURCE_DIR}/cvcore/include - ${CMAKE_CURRENT_SOURCE_DIR}/3dv/include) - -add_library(cvcore_ess STATIC - # Tensorops - cvcore/src/tensor_ops/ArithmeticOperations.cpp - cvcore/src/tensor_ops/BBoxUtils.cpp - cvcore/src/tensor_ops/ColorConversions.cpp - cvcore/src/tensor_ops/DBScan.cpp - cvcore/src/tensor_ops/Errors.cpp - cvcore/src/tensor_ops/Filters.cpp - cvcore/src/tensor_ops/Filters.h - cvcore/src/tensor_ops/FusedOperations.cpp - cvcore/src/tensor_ops/GeometryTransforms.cpp - cvcore/src/tensor_ops/IImageWarp.cpp - cvcore/src/tensor_ops/NppUtils.cpp - cvcore/src/tensor_ops/NppUtils.h - cvcore/src/tensor_ops/OneEuroFilter.cpp - cvcore/src/tensor_ops/TensorOperators.cpp - - # Core - cvcore/src/core/cvcore/Array.cpp - cvcore/src/core/cvcore/Dummy.cu - cvcore/src/core/cvcore/MathTypes.cpp - cvcore/src/core/cvcore/Tensor.cpp - cvcore/src/core/utility/CVError.cpp - cvcore/src/core/utility/Instrumentation.cpp - cvcore/src/core/utility/Memory.cpp - cvcore/src/core/utility/ProfileUtils.cpp - +add_library(corelib STATIC # Inferencer (ESS only) - cvcore/src/inferencer/tensorrt/TensorRTInferencer.cpp - cvcore/src/inferencer/Inferencer.cpp - cvcore/src/inferencer/Errors.cpp - cvcore/src/inferencer/tensorrt/TensorRTUtils.h - cvcore/src/inferencer/tensorrt/TensorRTUtils.cpp - cvcore/src/inferencer/tensorrt/TensorRTInferencer.h - - # TRTBackend - cvcore/src/trtbackend/TRTBackend.cpp + gems/dnn_inferencer/inferencer/TensorRTInferencer.cpp + gems/dnn_inferencer/inferencer/TensorRTUtils.h + gems/dnn_inferencer/inferencer/TensorRTUtils.cpp + gems/dnn_inferencer/inferencer/TensorRTInferencer.h + gems/dnn_inferencer/inferencer/Inferencer.cpp + gems/dnn_inferencer/inferencer/Errors.cpp ) +target_include_directories(corelib PRIVATE ${isaac_ros_image_proc_INCLUDE_DIRS}) -target_include_directories(cvcore_ess PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/cvcore/include) -target_compile_options(cvcore_ess PUBLIC -fPIC) -target_link_libraries(cvcore_ess PUBLIC - CUDA::cudart - CUDA::nppc - CUDA::nppial - CUDA::nppicc - CUDA::nppidei - CUDA::nppif - CUDA::nppig - CUDA::nppisu - TENSORRT::nvinfer -) - -add_library(ess_3dv STATIC - 3dv/src/ESS.cpp - 3dv/src/ESSPostProcess.cpp - 3dv/src/ESSPreProcess.cpp -) -target_include_directories(ess_3dv PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/3dv/include) -target_compile_options(ess_3dv PUBLIC -fPIC) -target_link_libraries(ess_3dv PUBLIC - cvcore_ess -) -target_link_libraries(gxf_cvcore_ess - PUBLIC +target_link_libraries(corelib PUBLIC + GXF::core GXF::cuda GXF::multimedia + CUDA::cudart + CUDA::nppc + CUDA::nppial + CUDA::nppicc + CUDA::nppidei + CUDA::nppif + CUDA::nppig + CUDA::nppisu + TENSORRT::nvinfer yaml-cpp - PRIVATE - cvcore_ess - ess_3dv ) + +target_compile_options(gxf_cvcore_ess PUBLIC -fPIC) + +target_link_libraries(gxf_cvcore_ess + corelib + isaac_ros_image_proc::gxf_tensorops +) \ No newline at end of file diff --git a/isaac_ros_ess/gxf/ess/ESS.cpp b/isaac_ros_ess/gxf/ess/ESS.cpp deleted file mode 100644 index 593e34c..0000000 --- a/isaac_ros_ess/gxf/ess/ESS.cpp +++ /dev/null @@ -1,325 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "extensions/ess/ESS.hpp" - -#include -#include "gxf/cuda/cuda_stream_id.hpp" -#include "gxf/cuda/cuda_stream_pool.hpp" -#include "gxf/multimedia/video.hpp" -#include "gxf/std/tensor.hpp" -#include "gxf/std/timestamp.hpp" - -namespace nvidia { -namespace cvcore { - -namespace detail { - -// Function to bind a cuda stream with cid into downstream message -gxf_result_t BindCudaStream(gxf::Entity& message, gxf_uid_t cid) { - if (cid == kNullUid) { - GXF_LOG_ERROR("stream_cid is null"); - return GXF_FAILURE; - } - auto output_stream_id = message.add("stream"); - if (!output_stream_id) { - GXF_LOG_ERROR("failed to add cudastreamid."); - return GXF_FAILURE; - } - output_stream_id.value()->stream_cid = cid; - return GXF_SUCCESS; -} - -// Function to record a new cuda event -gxf_result_t RecordCudaEvent(gxf::Entity& message, gxf::Handle& stream) { - // Create a new event - cudaEvent_t cuda_event; - cudaEventCreateWithFlags(&cuda_event, 0); - gxf::CudaEvent event; - auto ret = event.initWithEvent(cuda_event, stream->dev_id(), [](auto) {}); - if (!ret) { - GXF_LOG_ERROR("failed to init cuda event"); - return GXF_FAILURE; - } - // Record the event - // Can define []() { GXF_LOG_DEBUG("tensorops event synced"); } as callback func for debug purpose - ret = stream->record(event.event().value(), - [event = cuda_event, entity = message.clone().value()](auto) { cudaEventDestroy(event); }); - if (!ret) { - GXF_LOG_ERROR("record event failed"); - return ret.error(); - } - return GXF_SUCCESS; -} - -} // namespace detail - -gxf_result_t ESS::registerInterface(gxf::Registrar* registrar) { - gxf::Expected result; - - result &= registrar->parameter(left_image_name_, "left_image_name", "The name of the left image to be received", "", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(right_image_name_, "right_image_name", "The name of the right image to be received", - "", gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(output_name_, "output_name", "The name of the tensor to be passed to next node", ""); - result &= registrar->parameter(stream_pool_, "stream_pool", "The Cuda Stream pool for allocate cuda stream", "", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(pool_, "pool", "Memory pool for allocating output data", ""); - result &= registrar->parameter(left_image_receiver_, "left_image_receiver", "Receiver to get the left image", ""); - result &= registrar->parameter(right_image_receiver_, "right_image_receiver", "Receiver to get the right image", ""); - result &= registrar->parameter(output_transmitter_, "output_transmitter", "Transmitter to send the data", ""); - result &= registrar->parameter(output_adapter_, "output_adapter", "Adapter to send output data", ""); - - result &= registrar->parameter(image_type_, "image_type", "Type of input image: BGR_U8 or RGB_U8", ""); - result &= registrar->parameter(pixel_mean_, "pixel_mean", "The mean for each channel", ""); - result &= registrar->parameter(normalization_, "normalization", "The normalization for each channel", ""); - result &= - registrar->parameter(standard_deviation_, "standard_deviation", "The standard deviation for each channel", ""); - - result &= registrar->parameter(max_batch_size_, "max_batch_size", "The max batch size to run inference on", ""); - result &= registrar->parameter(input_layer_width_, "input_layer_width", "The model input layer width", ""); - result &= registrar->parameter(input_layer_height_, "input_layer_height", "The model input layer height", ""); - result &= registrar->parameter(model_input_type_, "model_input_type", "The model input image: BGR_U8 or RGB_U8", ""); - - result &= registrar->parameter(engine_file_path_, "engine_file_path", "The path to the serialized TRT engine", ""); - result &= registrar->parameter(input_layers_name_, "input_layers_name", "The names of the input layers", ""); - result &= registrar->parameter(output_layers_name_, "output_layers_name", "The names of the output layers", ""); - - result &= registrar->parameter(preprocess_type_, "preprocess_type", - "The type of ESS preprocessing: RESIZE / CENTER_CROP", ""); - result &= registrar->parameter(output_width_, "output_width", "The width of output result", "", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(output_height_, "output_height", "The height of output result", "", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(timestamp_policy_, "timestamp_policy", - "Input channel to get timestamp 0(left)/1(right)", "", 0); - - return gxf::ToResultCode(result); -} - -gxf_result_t ESS::start() { - // Allocate cuda stream using stream pool if necessary - if (stream_pool_.try_get()) { - auto stream = stream_pool_.try_get().value()->allocateStream(); - if (!stream) { - GXF_LOG_ERROR("allocating stream failed."); - return GXF_FAILURE; - } - cuda_stream_ = std::move(stream.value()); - if (!cuda_stream_->stream()) { - GXF_LOG_ERROR("allocated stream is not initialized."); - return GXF_FAILURE; - } - } - // Setting image pre-processing params for ESS - const auto& pixel_mean_vec = pixel_mean_.get(); - const auto& normalization_vec = normalization_.get(); - const auto& standard_deviation_vec = standard_deviation_.get(); - if (pixel_mean_vec.size() != 3 || normalization_vec.size() != 3 || standard_deviation_vec.size() != 3) { - GXF_LOG_ERROR("Invalid preprocessing params."); - return GXF_FAILURE; - } - - if (image_type_.get() == "BGR_U8") { - preProcessorParams.imgType = ::cvcore::BGR_U8; - } else if (image_type_.get() == "RGB_U8") { - preProcessorParams.imgType = ::cvcore::RGB_U8; - } else { - GXF_LOG_INFO("Wrong input image type. BGR_U8 and RGB_U8 are only supported."); - return GXF_FAILURE; - } - std::copy(pixel_mean_vec.begin(), pixel_mean_vec.end(), preProcessorParams.pixelMean); - std::copy(normalization_vec.begin(), normalization_vec.end(), preProcessorParams.normalization); - std::copy(standard_deviation_vec.begin(), standard_deviation_vec.end(), preProcessorParams.stdDev); - - // Setting model input params for ESS - modelInputParams.maxBatchSize = max_batch_size_.get(); - modelInputParams.inputLayerWidth = input_layer_width_.get(); - modelInputParams.inputLayerHeight = input_layer_height_.get(); - if (model_input_type_.get() == "BGR_U8") { - modelInputParams.modelInputType = ::cvcore::BGR_U8; - } else if (model_input_type_.get() == "RGB_U8") { - modelInputParams.modelInputType = ::cvcore::RGB_U8; - } else { - GXF_LOG_INFO("Wrong model input type. BGR_U8 and RGB_U8 are only supported."); - return GXF_FAILURE; - } - - // Setting inference params for ESS - inferenceParams = {engine_file_path_.get(), input_layers_name_.get(), output_layers_name_.get()}; - - // Setting extra params for ESS - if (preprocess_type_.get() == "RESIZE") { - extraParams = {::cvcore::ess::PreProcessType::RESIZE}; - } else if (preprocess_type_.get() == "CENTER_CROP") { - extraParams = {::cvcore::ess::PreProcessType::CENTER_CROP}; - } else { - GXF_LOG_ERROR("Invalid preprocessing type."); - return GXF_FAILURE; - } - - // Setting ESS object with the provided params - objESS.reset(new ::cvcore::ess::ESS(preProcessorParams, modelInputParams, inferenceParams, extraParams)); - - return GXF_SUCCESS; -} - -gxf_result_t ESS::tick() { - // Get a CUDA stream for execution - cudaStream_t cuda_stream = 0; - if (!cuda_stream_.is_null()) { - cuda_stream = cuda_stream_->stream().value(); - } - // Receiving the data - auto inputLeftMessage = left_image_receiver_->receive(); - if (!inputLeftMessage) { - return GXF_FAILURE; - } - if (cuda_stream != 0) { - detail::RecordCudaEvent(inputLeftMessage.value(), cuda_stream_); - auto inputLeftStreamId = inputLeftMessage.value().get("stream"); - if (inputLeftStreamId) { - auto inputLeftStream = gxf::Handle::Create(inputLeftStreamId.value().context(), - inputLeftStreamId.value()->stream_cid); - // NOTE: This is an expensive call. It will halt the current CPU thread until all events - // previously associated with the stream are cleared - if (!inputLeftStream.value()->syncStream()) { - GXF_LOG_ERROR("sync left stream failed."); - return GXF_FAILURE; - } - } - } - - auto inputRightMessage = right_image_receiver_->receive(); - if (!inputRightMessage) { - return GXF_FAILURE; - } - if (cuda_stream != 0) { - detail::RecordCudaEvent(inputRightMessage.value(), cuda_stream_); - auto inputRightStreamId = inputRightMessage.value().get("stream"); - if (inputRightStreamId) { - auto inputRightStream = gxf::Handle::Create(inputRightStreamId.value().context(), - inputRightStreamId.value()->stream_cid); - // NOTE: This is an expensive call. It will halt the current CPU thread until all events - // previously associated with the stream are cleared - if (!inputRightStream.value()->syncStream()) { - GXF_LOG_ERROR("sync right stream failed."); - return GXF_FAILURE; - } - } - } - - auto maybeLeftName = left_image_name_.try_get(); - auto leftInputBuffer = inputLeftMessage.value().get(maybeLeftName ? maybeLeftName.value().c_str() : nullptr); - if (!leftInputBuffer) { - return GXF_FAILURE; - } - auto maybeRightName = right_image_name_.try_get(); - auto rightInputBuffer = inputRightMessage.value().get(maybeRightName ? maybeRightName.value().c_str() : nullptr); - if (!rightInputBuffer) { - return GXF_FAILURE; - } - if (leftInputBuffer.value()->storage_type() != gxf::MemoryStorageType::kDevice || - rightInputBuffer.value()->storage_type() != gxf::MemoryStorageType::kDevice) { - GXF_LOG_ERROR("input images must be on GPU."); - return GXF_FAILURE; - } - - const auto& leftBufferInfo = leftInputBuffer.value()->video_frame_info(); - const auto& rightBufferInfo = rightInputBuffer.value()->video_frame_info(); - if (leftBufferInfo.width != rightBufferInfo.width || leftBufferInfo.height != rightBufferInfo.height || - leftBufferInfo.color_format != rightBufferInfo.color_format) { - GXF_LOG_ERROR("left/right images mismatch."); - return GXF_FAILURE; - } - const size_t outputWidth = output_width_.try_get() ? output_width_.try_get().value() : leftBufferInfo.width; - const size_t outputHeight = output_height_.try_get() ? output_height_.try_get().value() : leftBufferInfo.height; - - // Creating GXF Tensor or VideoBuffer to hold the data to be transmitted - gxf::Expected outputMessage = gxf::Entity::New(context()); - if (!outputMessage) { - return outputMessage.error(); - } - auto error = output_adapter_.get()->AddImageToMessage<::cvcore::ImageType::Y_F32>( - outputMessage.value(), outputWidth, outputHeight, pool_.get(), false, output_name_.get().c_str()); - if (error != GXF_SUCCESS) { - return GXF_FAILURE; - } - auto outputImage = output_adapter_.get()->WrapImageFromMessage<::cvcore::ImageType::Y_F32>( - outputMessage.value(), output_name_.get().c_str()); - if (!outputImage) { - return GXF_FAILURE; - } - - // Creating CVCore Tensors to hold the input and output data - ::cvcore::Tensor<::cvcore::NHWC, ::cvcore::C1, ::cvcore::F32> outputImageDevice(outputWidth, outputHeight, 1, - outputImage.value().getData(), false); - - // Running the inference - auto inputColorFormat = leftBufferInfo.color_format; - if (inputColorFormat == gxf::VideoFormat::GXF_VIDEO_FORMAT_RGB || - inputColorFormat == gxf::VideoFormat::GXF_VIDEO_FORMAT_BGR) { - ::cvcore::Tensor<::cvcore::NHWC, ::cvcore::C3, ::cvcore::U8> leftImageDevice( - leftBufferInfo.width, leftBufferInfo.height, 1, reinterpret_cast(leftInputBuffer.value()->pointer()), - false); - ::cvcore::Tensor<::cvcore::NHWC, ::cvcore::C3, ::cvcore::U8> rightImageDevice( - leftBufferInfo.width, leftBufferInfo.height, 1, reinterpret_cast(rightInputBuffer.value()->pointer()), - false); - objESS->execute(outputImageDevice, leftImageDevice, rightImageDevice, cuda_stream); - } else if (inputColorFormat == gxf::VideoFormat::GXF_VIDEO_FORMAT_R32_G32_B32 || - inputColorFormat == gxf::VideoFormat::GXF_VIDEO_FORMAT_B32_G32_R32) { - ::cvcore::Tensor<::cvcore::NCHW, ::cvcore::C3, ::cvcore::F32> leftImageDevice( - leftBufferInfo.width, leftBufferInfo.height, 1, reinterpret_cast(leftInputBuffer.value()->pointer()), - false); - ::cvcore::Tensor<::cvcore::NCHW, ::cvcore::C3, ::cvcore::F32> rightImageDevice( - leftBufferInfo.width, leftBufferInfo.height, 1, reinterpret_cast(rightInputBuffer.value()->pointer()), - false); - objESS->execute(outputImageDevice, leftImageDevice, rightImageDevice, cuda_stream); - } else { - GXF_LOG_ERROR("invalid input image type."); - return GXF_FAILURE; - } - - // Allocate a cuda event that can be used to record on each tick - if (!cuda_stream_.is_null()) { - detail::BindCudaStream(outputMessage.value(), cuda_stream_.cid()); - detail::RecordCudaEvent(outputMessage.value(), cuda_stream_); - } - - // Pass down timestamp if necessary - auto maybeDaqTimestamp = timestamp_policy_.get() == 0 ? inputLeftMessage.value().get() - : inputRightMessage.value().get(); - if (maybeDaqTimestamp) { - auto outputTimestamp = outputMessage.value().add(maybeDaqTimestamp.value().name()); - if (!outputTimestamp) { - return outputTimestamp.error(); - } - *outputTimestamp.value() = *maybeDaqTimestamp.value(); - } - - // Send the data - output_transmitter_->publish(outputMessage.value()); - return GXF_SUCCESS; -} - -gxf_result_t ESS::stop() { - objESS.reset(nullptr); - return GXF_SUCCESS; -} - -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/TensorOps.cpp b/isaac_ros_ess/gxf/ess/TensorOps.cpp deleted file mode 100644 index 4740f02..0000000 --- a/isaac_ros_ess/gxf/ess/TensorOps.cpp +++ /dev/null @@ -1,95 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include "extensions/ess/ESS.hpp" -#include "extensions/tensor_ops/CameraModel.hpp" -#include "extensions/tensor_ops/ConvertColorFormat.hpp" -#include "extensions/tensor_ops/CropAndResize.hpp" -#include "extensions/tensor_ops/Frame3D.hpp" -#include "extensions/tensor_ops/ImageAdapter.hpp" -#include "extensions/tensor_ops/InterleavedToPlanar.hpp" -#include "extensions/tensor_ops/Normalize.hpp" -#include "extensions/tensor_ops/Reshape.hpp" -#include "extensions/tensor_ops/Resize.hpp" -#include "extensions/tensor_ops/TensorOperator.hpp" -#include "extensions/tensor_ops/TensorStream.hpp" -#include "extensions/tensor_ops/Undistort.hpp" - -#include "gxf/std/extension_factory_helper.hpp" - -GXF_EXT_FACTORY_BEGIN() -GXF_EXT_FACTORY_SET_INFO( - 0x6eae64ff97a94d9b, 0xb324f85e6a98a75a, "NvCvTensorOpsExtension", - "Generic CVCORE tensor_ops interfaces", "Nvidia_Gxf", "3.1.0", "LICENSE"); - -GXF_EXT_FACTORY_ADD( - 0xd073a92344ba4b82, 0xbd0f18f4996048e8, nvidia::cvcore::tensor_ops::CameraModel, - nvidia::gxf::Component, - "Construct Camera distortion model / Camera intrinsic compatible with CVCORE"); - -GXF_EXT_FACTORY_ADD( - 0x6c9419223e4b4c2c, 0x899a4d65279c6508, nvidia::cvcore::tensor_ops::Frame3D, - nvidia::gxf::Component, - "Construct Camera extrinsic compatible with CVCORE"); - -GXF_EXT_FACTORY_ADD( - 0xd94385e5b35b4635, 0x9adb0d214a3865f7, nvidia::cvcore::tensor_ops::TensorStream, - nvidia::gxf::Component, "Wrapper of CVCORE ITensorOperatorStream/ITensorOperatorContext"); - -GXF_EXT_FACTORY_ADD( - 0xd0c4ddad486a4a92, 0xb69c8a5304b205ea, nvidia::cvcore::tensor_ops::ImageAdapter, - nvidia::gxf::Component, "Utility component for conversion between message and cvcore image type"); - -GXF_EXT_FACTORY_ADD( - 0xadebc792bd0b4a57, 0x99c1405fd2ea0728, nvidia::cvcore::tensor_ops::StreamUndistort, - nvidia::gxf::Codelet, "Codelet for stream image undistortion in tensor_ops"); - -GXF_EXT_FACTORY_ADD( - 0xa58141ac7eca4ea6, 0x9b545446fe379a12, nvidia::cvcore::tensor_ops::Resize, nvidia::gxf::Codelet, - "Codelet for image resizing in tensor_ops"); - -GXF_EXT_FACTORY_ADD( - 0xeb8b5f5b36d44b49, 0x81f959fd28e6f678, nvidia::cvcore::tensor_ops::StreamResize, - nvidia::gxf::Codelet, "Codelet for stream image resizing in tensor_ops"); - -GXF_EXT_FACTORY_ADD( - 0x4a7ff422de3841bd, 0x9e743ac10d9294b7, nvidia::cvcore::tensor_ops::CropAndResize, - nvidia::gxf::Codelet, "Codelet for crop and resizing operation in tensor_ops"); - -GXF_EXT_FACTORY_ADD( - 0x7018f0b9034c462c, 0xa9fbaf7ee012974a, nvidia::cvcore::tensor_ops::Normalize, - nvidia::gxf::Codelet, - "Codelet for image normalization in tensor_ops"); - -GXF_EXT_FACTORY_ADD( - 0x269d4237f3c3479e, 0xbcca9ecc44c71a71, nvidia::cvcore::tensor_ops::InterleavedToPlanar, - nvidia::gxf::Codelet, "Codelet for convert interleaved image to planar image in tensor_ops"); - -GXF_EXT_FACTORY_ADD( - 0xfc4d7b4d8fcc4dab, 0xa286056e0fcafa79, nvidia::cvcore::tensor_ops::ConvertColorFormat, - nvidia::gxf::Codelet, "Codelet for image color conversion in tensor_ops"); - -GXF_EXT_FACTORY_ADD( - 0x5ab4a4d8f7a34553, 0xa90be52660b076fe, nvidia::cvcore::tensor_ops::StreamConvertColorFormat, - nvidia::gxf::Codelet, "Codelet for stream image color conversion in tensor_ops"); - -GXF_EXT_FACTORY_ADD( - 0x26789b7d5a8d4e85, 0x86b845ec5f4cd12b, nvidia::cvcore::tensor_ops::Reshape, nvidia::gxf::Codelet, - "Codelet for image reshape in tensor_ops"); -GXF_EXT_FACTORY_ADD( - 0x1aa1eea914344aff, 0x97fddaaedb594121, nvidia::cvcore::ESS, nvidia::gxf::Codelet, - "ESS GXF Extension"); -GXF_EXT_FACTORY_END() diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Array.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Array.h deleted file mode 100644 index c9f23d8..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Array.h +++ /dev/null @@ -1,386 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_ARRAY_H -#define CVCORE_ARRAY_H - -#include -#include -#include - -namespace cvcore { - -/** - * Base implementation of Array. - */ -class ArrayBase -{ -public: - /** - * Constructor of a non-owning arrays. - * @param capacity capacity of the array. - * @param elemSize byte size of each element. - * @param dataPtr data pointer to the raw source array. - * @param isCPU whether to allocate the array on CPU or GPU. - */ - ArrayBase(std::size_t capacity, std::size_t elemSize, void *dataPtr, bool isCPU); - - /** - * Constructor of a memory-owning arrays - * @param capacity capacity of the array. - * @param elemSize byte size of each element. - * @param isCPU whether to allocate the array on CPU or GPU. - */ - ArrayBase(std::size_t capacity, std::size_t elemSize, bool isCPU); - - /** - * Destructor of ArrayBase. - */ - ~ArrayBase(); - - /** - * ArrayBase is non-copyable. - */ - ArrayBase(const ArrayBase &) = delete; - - /** - * ArrayBase is non-copyable. - */ - ArrayBase &operator=(const ArrayBase &) = delete; - - /** - * Move constructor of ArrayBase. - */ - ArrayBase(ArrayBase &&); - - /** - * Move assignment operator of ArrayBase. - */ - ArrayBase &operator=(ArrayBase &&); - - /** - * Get the pointer to specified index. - * @param idx element index. - * @return pointer to the specified element. - */ - void *getElement(int idx) const; - - /** - * Get the size of the array. - * @return size of the array. - */ - std::size_t getSize() const; - - /** - * Get the capacity of the array. - * @return size of the array. - */ - std::size_t getCapacity() const; - - /** - * Get the size of each element. - * @return size of each element. - */ - std::size_t getElementSize() const; - - /** - * Set the size of the array. - * @param size size of the array. - */ - void setSize(std::size_t size); - - /** - * Get the flag whether the array is CPU or GPU array. - * @return whether the array is CPU array. - */ - bool isCPU() const; - - /** - * Get the flag whether the array is owning memory space. - * @return whether the array owns memory. - */ - bool isOwning() const; - - /** - * Get the raw pointer to the array data. - * @return void pointer to the first element of the array. - */ - void *getData() const; - -private: - ArrayBase(); - - void *m_data; - std::size_t m_size; - std::size_t m_capacity; - std::size_t m_elemSize; - bool m_isOwning; - bool m_isCPU; -}; - -/** - * Implementation of Array class. - * @tparam T type of element in array. - */ -template -class Array : public ArrayBase -{ -public: - /** - * Default constructor of an array. - */ - Array() - : ArrayBase{0, sizeof(T), nullptr, true} - { - } - - /** - * Constructor of a non-owning array. - * @param size size of the array. - * @param capacity capacity of the array. - * @param dataPtr data pointer to the raw source array. - * @param isCPU whether to allocate array on CPU or GPU. - */ - Array(std::size_t size, std::size_t capacity, void *dataPtr, bool isCPU = true) - : ArrayBase{capacity, sizeof(T), dataPtr, isCPU} - { - ArrayBase::setSize(size); - } - - /** - * Constructor of a memory-owning array. - * @param capacity capacity of the array. - * @param isCPU whether to allocate array on CPU or GPU. - */ - Array(std::size_t capacity, bool isCPU = true) - : ArrayBase{capacity, sizeof(T), isCPU} - { - } - - /** - * Destructor of the Array. - */ - ~Array() - { - // call resize here such that CPU-based destructor - // will call destructors of the objects stored - // in the array before deallocating the storage - setSize(0); - } - - /** - * Array is non-copyable. - */ - Array(const Array &) = delete; - - /** - * Array is non-copyable. - */ - Array &operator=(const Array &) = delete; - - /** - * Move constructor of Array. - */ - Array(Array &&t) - : Array() - { - *this = std::move(t); - } - - /** - * Move assignment operator of Array. - */ - Array &operator=(Array &&t) - { - static_cast(*this) = std::move(t); - return *this; - } - - /** - * Set size of the Array. - * @param size size of the Array. - */ - void setSize(std::size_t size) - { - const std::size_t oldSize = getSize(); - ArrayBase::setSize(size); - if (isCPU()) - { - // shrinking case - for (std::size_t i = size; i < oldSize; ++i) - { - reinterpret_cast(getElement(i))->~T(); - } - // expanding case - for (std::size_t i = oldSize; i < size; ++i) - { - new (getElement(i)) T; - } - } - } - - /** - * Const array index operator. - * @param idx index of element. - * @return const reference to the specified element. - */ - const T &operator[](int idx) const - { - assert(idx >= 0 && idx < getSize()); - return *reinterpret_cast(getElement(idx)); - } - - /** - * Array index operator. - * @param idx index of element. - * @return reference to the specified element. - */ - T &operator[](int idx) - { - assert(idx >= 0 && idx < getSize()); - return *reinterpret_cast(getElement(idx)); - } -}; - -/** - * Implementation of ArrayN class. - * @tparam T type of element in array. - * @tparam N capacity of array. - */ -template -class ArrayN : public ArrayBase -{ -public: - /** - * Default constructor of ArrayN (create an owning Tensor with capacity N). - */ - ArrayN() - : ArrayBase{N, sizeof(T), true} - { - setSize(N); - } - - /** - * Constructor of a non-owning ArrayN. - * @param size size of the array. - * @param dataPtr data pointer to the raw source array. - * @param isCPU whether to allocate array on CPU or GPU. - */ - ArrayN(std::size_t size, void *dataPtr, bool isCPU = true) - : ArrayBase{N, sizeof(T), dataPtr, isCPU} - { - ArrayBase::setSize(size); - } - - /** - * Constructor of a memory-owning ArrayN. - * @param isCPU whether to allocate array on CPU or GPU. - */ - ArrayN(bool isCPU) - : ArrayBase{N, sizeof(T), isCPU} - { - setSize(N); - } - - /** - * Destructor of the ArrayN. - */ - ~ArrayN() - { - // call resize here such that CPU-based destructor - // will call destructors of the objects stored - // in the array before deallocating the storage - setSize(0); - } - - /** - * ArrayN is non-copyable. - */ - ArrayN(const ArrayN &) = delete; - - /** - * ArrayN is non-copyable. - */ - ArrayN &operator=(const ArrayN &) = delete; - - /** - * Move constructor of ArrayN. - */ - ArrayN(ArrayN &&t) - : ArrayN() - { - *this = std::move(t); - } - - /** - * Move assignment operator of ArrayN. - */ - ArrayN &operator=(ArrayN &&t) - { - static_cast(*this) = std::move(t); - return *this; - } - - /** - * Set size of the ArrayN. - * @param size size of the ArrayN. - */ - void setSize(std::size_t size) - { - const std::size_t oldSize = getSize(); - ArrayBase::setSize(size); - if (isCPU()) - { - // shrinking case - for (std::size_t i = size; i < oldSize; ++i) - { - reinterpret_cast(getElement(i))->~T(); - } - // expanding case - for (std::size_t i = oldSize; i < size; ++i) - { - new (getElement(i)) T; - } - } - } - - /** - * Const ArrayN index operator. - * @param idx index of element. - * @return const reference to the specified element. - */ - const T &operator[](int idx) const - { - assert(idx >= 0 && idx < getSize()); - return *reinterpret_cast(getElement(idx)); - } - - /** - * ArrayN index operator. - * @param idx index of element. - * @return reference to the specified element. - */ - T &operator[](int idx) - { - assert(idx >= 0 && idx < getSize()); - return *reinterpret_cast(getElement(idx)); - } -}; - -} // namespace cvcore - -#endif // CVCORE_ARRAY_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/BBox.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/BBox.h deleted file mode 100644 index 93100d3..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/BBox.h +++ /dev/null @@ -1,142 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_BBOX_H -#define CVCORE_BBOX_H - -#include -#include -#include - -namespace cvcore { - -/** - * A struct. - * Structure used to store bounding box. - */ -struct BBox -{ - int xmin{0}; /**< minimum x coordinate. */ - int ymin{0}; /**< minimum y coordinate. */ - int xmax{0}; /**< maximum x coordinate. */ - int ymax{0}; /**< maximum y coordinate. */ - - /** - * Clamp a bounding box based on a restricting clamp box - * @param Clamping bounding box (xmin, ymin, xmax, ymax) - * @return Clamped bounding box - */ - BBox clamp(const BBox &clampBox) const - { - BBox outbox; - outbox.xmin = std::max(clampBox.xmin, xmin); - outbox.xmax = std::min(clampBox.xmax, xmax); - outbox.ymin = std::max(clampBox.ymin, ymin); - outbox.ymax = std::min(clampBox.ymax, ymax); - return outbox; - } - - /** - * @return Width of the bounding box - */ - size_t getWidth() const - { - return xmax - xmin; - } - - /** - * @return Height of the bounding box - */ - size_t getHeight() const - { - return ymax - ymin; - } - - /** - * Checks if the bounding box is valid. - */ - bool isValid() const - { - return (xmin < xmax) && (ymin < ymax) && (getWidth() > 0) && (getHeight() > 0); - } - - /** - * Returns the center of the bounding box - * @return X,Y coordinate tuple - */ - std::pair getCenter() const - { - int centerX = xmin + getWidth() / 2; - int centerY = ymin + getHeight() / 2; - return std::pair(centerX, centerY); - } - - /** - * Scales bounding box based along the width and height retaining the same center. - * @param Scale in X direction along the width - * @param Scale in Y direction along the height - * @return Scaled bounding box - */ - BBox scale(float scaleW, float scaleH) const - { - auto center = getCenter(); - float newW = getWidth() * scaleW; - float newH = getHeight() * scaleH; - BBox outbox; - outbox.xmin = center.first - newW / 2; - outbox.xmax = center.first + newW / 2; - outbox.ymin = center.second - newH / 2; - outbox.ymax = center.second + newH / 2; - - return outbox; - } - - /** - * Resizes bounding box to a square bounding box based on - * the longest edge and clamps the bounding box based on the limits provided. - * @param Clamping bounding box (xmin, ymin, xmax, ymax) - * @return Sqaure bounding box - */ - BBox squarify(const BBox &clampBox) const - { - size_t w = getWidth(); - size_t h = getHeight(); - - BBox clampedBox1 = clamp(clampBox); - if (!clampedBox1.isValid()) - { - throw std::range_error("Invalid bounding box generated\n"); - } - float scaleW = static_cast(std::max(w, h)) / w; - float scaleH = static_cast(std::max(w, h)) / h; - BBox scaledBBox = clampedBox1.scale(scaleW, scaleH); - BBox clampedBox2 = scaledBBox.clamp(clampBox); - if (!clampedBox2.isValid()) - { - throw std::range_error("Invalid bounding box generated\n"); - } - size_t newW = clampedBox2.getWidth(); - size_t newH = clampedBox2.getHeight(); - size_t minW = std::min(newH, newW); - clampedBox2.ymax = clampedBox2.ymin + minW; - clampedBox2.xmax = clampedBox2.xmin + minW; - return clampedBox2; - } -}; - -} // namespace cvcore -#endif // CVCORE_BBOX_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/CVError.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/CVError.h deleted file mode 100644 index 82c16c1..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/CVError.h +++ /dev/null @@ -1,116 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_CVERROR_H -#define CVCORE_CVERROR_H - -#include -#include -#include -#include -#include - -#include - -namespace cvcore { - -// CVCORE ERROR CODES -// ----------------------------------------------------------------------------- -// Defining the CVCORE Error Codes on std::error_condition -// std::error_condition creates a set of sub-system independent codes which may -// be used to describe ANY downstream error in a broad sense. An std::error_code -// is defined within the sub-system context (i.e. tensor_ops, trtbackend, ...) -// which is mapped to the cvcore::ErrorCode. -// As an example, cvcore::ErrorCode -1 may not ABSOLUTELY mean the same as -// tensor_ops::FaultCode -1, but does mean the same as tensor_ops:FaultCode 4. -// Thus, tensor_ops::FaultCode 4 needs to be mapped to cvcore::ErrorCode -1. -enum class ErrorCode : std::int32_t -{ - SUCCESS = 0, - NOT_READY, - NOT_IMPLEMENTED, - INVALID_ARGUMENT, - INVALID_IMAGE_FORMAT, - INVALID_STORAGE_TYPE, - INVALID_ENGINE_TYPE, - INVALID_OPERATION, - DETECTED_NAN_IN_RESULT, - OUT_OF_MEMORY, - DEVICE_ERROR, - SYSTEM_ERROR, -}; - -} // namespace cvcore - -// WARNING: Extending base C++ namespace to cover cvcore error codes -namespace std { - -template<> -struct is_error_condition_enum : true_type -{ -}; - -template<> -struct is_error_code_enum : true_type -{ -}; - -} // namespace std - -namespace cvcore { - -std::error_condition make_error_condition(ErrorCode) noexcept; - -std::error_code make_error_code(ErrorCode) noexcept; - -// ----------------------------------------------------------------------------- - -inline void CheckCudaError(cudaError_t code, const char *file, const int line) -{ - if (code != cudaSuccess) - { - const char *errorMessage = cudaGetErrorString(code); - const std::string message = "CUDA error returned at " + std::string(file) + ":" + std::to_string(line) + - ", Error code: " + std::to_string(code) + " (" + std::string(errorMessage) + ")"; - throw std::runtime_error(message); - } -} - -inline void CheckErrorCode(std::error_code err, const char *file, const int line) -{ - const std::string message = "Error returned at " + std::string(file) + ":" + std::to_string(line) + - ", Error code: " + std::string(err.message()); - - if (err != cvcore::make_error_code(cvcore::ErrorCode::SUCCESS)) - { - throw std::runtime_error(message); - } -} - -} // namespace cvcore - -#define CHECK_ERROR(val) \ - { \ - cvcore::CheckCudaError((val), __FILE__, __LINE__); \ - } - -#define CHECK_ERROR_CODE(val) \ - { \ - cvcore::CheckErrorCode((val), __FILE__, __LINE__); \ - } - -#endif // CVCORE_CVERROR_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/CameraModel.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/CameraModel.h deleted file mode 100644 index 157acf4..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/CameraModel.h +++ /dev/null @@ -1,292 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_CAMERAMODEL_H -#define CVCORE_CAMERAMODEL_H - -#include - -#include "cv/core/Array.h" -#include "cv/core/MathTypes.h" - -namespace cvcore { - -/** - * An enum. - * Enum type for Camera Distortion type. - */ -enum class CameraDistortionType : uint8_t -{ - UNKNOWN, /**< Unknown arbitrary distortion model. */ - NONE, /**< No distortion applied. */ - Polynomial, /**< Polynomial distortion model. */ - FisheyeEquidistant, /**< Equidistant Fisheye distortion model. */ - FisheyeEquisolid, /**< Equisolid Fisheye distortion model. */ - FisheyeOrthoGraphic, /**< Orthographic Fisheye distortion model. */ - FisheyeStereographic /**< Stereographic Fisheye distortion model. */ -}; - -/** - * Struct type used to store Camera Distortion model type and coefficients. - */ -struct CameraDistortionModel -{ - CameraDistortionType type; /**< Camera distortion model type. */ - union /**< Camera distortion model coefficients. */ - { - float coefficients[8]; - struct - { - float k1, k2, k3, k4, k5, k6; - float p1, p2; - }; - }; - - CameraDistortionModel() - : type(CameraDistortionType::UNKNOWN), - k1(0.0), k2(0.0), k3(0.0), k4(0.0), k5(0.0), k6(0.0), - p1(0.0), p2(0.0) {} - - /** - * Camera Distortion Model creation using array of coefficients. - * @param distortionType Camera distortion model type - * @param distortionCoefficients An array of camera distortion model coefficients - * @return Camera Distortion Model - */ - CameraDistortionModel(CameraDistortionType distortionType, std::array & distortionCoefficients) - : type(distortionType) - { - std::copy(distortionCoefficients.begin(), distortionCoefficients.end(), std::begin(coefficients)); - } - - /** - * Camera Distortion Model creation using individual coefficients. - * @param distortionType Camera distortion model type - * @param k1 Camera distortion model coefficient - k1 - * @param k2 Camera distortion model coefficient - k2 - * @param k3 Camera distortion model coefficient - k3 - * @param k4 Camera distortion model coefficient - k4 - * @param k5 Camera distortion model coefficient - k5 - * @param k6 Camera distortion model coefficient - k6 - * @param p1 Camera distortion model coefficient - p1 - * @param p2 Camera distortion model coefficient - p2 - * @return Camera Distortion Model - */ - CameraDistortionModel(CameraDistortionType distortionType, float k1, float k2, float k3, \ - float k4, float k5, float k6, float p1, float p2) - : type(distortionType) - , k1(k1) - , k2(k2) - , k3(k3) - , k4(k4) - , k5(k5) - , k6(k6) - , p1(p1) - , p2(p2) - { - - } - - /** - * Get camera distortion model type. - * @return Camera distortion model type - */ - CameraDistortionType getDistortionType() const - { - return type; - } - - /** - * Get camera distortion model coefficients. - * @return Camera distortion model coefficients array - */ - const float * getCoefficients() const - { - return &coefficients[0]; - } - - inline bool operator==(const CameraDistortionModel & other) const noexcept - { - return this->k1 == other.k1 && - this->k2 == other.k2 && - this->k3 == other.k3 && - this->k4 == other.k4 && - this->k5 == other.k5 && - this->k6 == other.k6 && - this->p1 == other.p1 && - this->p2 == other.p2; - } - - inline bool operator!=(const CameraDistortionModel & other) const noexcept - { - return !(*this == other); - } -}; - -/** - * Struct type used to store Camera Intrinsics. - */ -struct CameraIntrinsics -{ - CameraIntrinsics() = default; - - /** - * Camera Instrinsics creation with given intrinsics values - * @param fx Camera axis x focal length in pixels - * @param fy Camera axis y focal length in pixels - * @param cx Camera axis x principal point in pixels - * @param cy Camera axis y principal point in pixels - * @param s Camera slanted pixel - * @return Camera Intrinsics - */ - CameraIntrinsics(float fx, float fy, float cx, float cy, float s = 0.0) - { - m_intrinsics[0][0] = fx; - m_intrinsics[0][1] = s; - m_intrinsics[0][2] = cx; - m_intrinsics[1][0] = 0.0; - m_intrinsics[1][1] = fy; - m_intrinsics[1][2] = cy; - } - - /** - * Get camera intrinsics x focal length. - * @return Camera x focal length - */ - float fx() const - { - return m_intrinsics[0][0]; - } - - /** - * Get camera intrinsics y focal length. - * @return Camera y focal length - */ - float fy() const - { - return m_intrinsics[1][1]; - } - - /** - * Get camera intrinsics x principal point. - * @return Camera x principal point - */ - float cx() const - { - return m_intrinsics[0][2]; - } - - /** - * Get camera intrinsics y principal point. - * @return Camera y principal point - */ - float cy() const - { - return m_intrinsics[1][2]; - } - - /** - * Get camera intrinsics slanted pixels. - * @return Camera slanted pixels - */ - float skew() const - { - return m_intrinsics[0][1]; - } - - /** - * Get camera intrinsics 2D array. - * @return Camera intrisics array - */ - const float * getMatrix23() const - { - return &m_intrinsics[0][0]; - } - - inline bool operator==(const CameraIntrinsics & other) const noexcept - { - return m_intrinsics[0][0] == other.m_intrinsics[0][0] && - m_intrinsics[0][1] == other.m_intrinsics[0][1] && - m_intrinsics[0][2] == other.m_intrinsics[0][2] && - m_intrinsics[1][0] == other.m_intrinsics[1][0] && - m_intrinsics[1][1] == other.m_intrinsics[1][1] && - m_intrinsics[1][2] == other.m_intrinsics[1][2]; - } - - inline bool operator!=(const CameraIntrinsics & other) const noexcept - { - return !(*this == other); - } - - float m_intrinsics[2][3] {{1.0, 0.0, 0.0},{0.0, 1.0, 0.0}}; /**< Camera intrinsics 2D arrat. */ -}; - -/** - * Struct type used to store Camera Extrinsics. - */ -struct CameraExtrinsics -{ - using RawMatrixType = float[3][4]; - - CameraExtrinsics() = default; - - /** - * Camera Extrinsics creation with given extrinsics as raw 2D [3 x 4] array - * @param extrinsics Camera extrinsics as raw 2D array - * @return Camera Extrinsics - */ - explicit CameraExtrinsics(const RawMatrixType & extrinsics) - { - std::copy(&extrinsics[0][0], &extrinsics[0][0] + 3 * 4, &m_extrinsics[0][0]); - } - - inline bool operator==(const CameraExtrinsics & other) const noexcept - { - return m_extrinsics[0][0] == other.m_extrinsics[0][0] && - m_extrinsics[0][1] == other.m_extrinsics[0][1] && - m_extrinsics[0][2] == other.m_extrinsics[0][2] && - m_extrinsics[0][3] == other.m_extrinsics[0][3] && - m_extrinsics[1][0] == other.m_extrinsics[1][0] && - m_extrinsics[1][1] == other.m_extrinsics[1][1] && - m_extrinsics[1][2] == other.m_extrinsics[1][2] && - m_extrinsics[1][3] == other.m_extrinsics[1][3] && - m_extrinsics[2][0] == other.m_extrinsics[2][0] && - m_extrinsics[2][1] == other.m_extrinsics[2][1] && - m_extrinsics[2][2] == other.m_extrinsics[2][2] && - m_extrinsics[2][3] == other.m_extrinsics[2][3]; - } - - inline bool operator!=(const CameraExtrinsics & other) const noexcept - { - return !(*this == other); - } - - RawMatrixType m_extrinsics {{1.0, 0.0, 0.0, 0.0}, - {0.0, 1.0, 0.0, 0.0}, - {0.0, 0.0, 1.0, 0.0}}; -}; - -struct CameraModel -{ - CameraIntrinsics intrinsic; - CameraExtrinsics extrinsic; - CameraDistortionModel distortion; -}; - -} // namespace cvcore - -#endif // CVCORE_CAMERAMODEL_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/ComputeEngine.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/ComputeEngine.h deleted file mode 100644 index 65fe7ca..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/ComputeEngine.h +++ /dev/null @@ -1,43 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_COMPUTEENGINE_H -#define CVCORE_COMPUTEENGINE_H - -#include - -namespace cvcore { - -enum class ComputeEngine : unsigned int -{ - UNKNOWN = 0x00, // 0000_0000 - - CPU = 0x01, // 0000_0001 - PVA = 0x02, // 0000_0010 - VIC = 0x04, // 0000_0100 - NVENC = 0x08, // 0000_1000 - GPU = 0x10, // 0001_0000 - DLA = 0x20, // 0010_0000 - DLA_CORE_0 = 0x40, // 0100_0000 - DLA_CORE_1 = 0x80, // 1000_0000 - - COMPUTE_FAULT = 0xFF // 1111_1111 -}; - -} // namespace cvcore - -#endif // CVCORE_COMPUTEENGINE_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Core.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Core.h deleted file mode 100644 index 42732d9..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Core.h +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CORE_H -#define CORE_H - -namespace cvcore { - -// Enable dll imports/exports in case of windows support -#ifdef _WIN32 -#ifdef CVCORE_EXPORT_SYMBOLS // Needs to be enabled in case of compiling dll -#define CVCORE_API __declspec(dllexport) // Exports symbols when compiling the library. -#else -#define CVCORE_API __declspec(dllimport) // Imports the symbols when linked with library. -#endif -#else -#define CVCORE_API -#endif - -} // namespace cvcore -#endif // CORE_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Image.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Image.h deleted file mode 100644 index 263a699..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Image.h +++ /dev/null @@ -1,893 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_IMAGE_H -#define CVCORE_IMAGE_H - -#include -#include -#include -#include - -#include "Memory.h" -#include "Tensor.h" - -namespace cvcore { - -/** - * An enum. - * Enum type for image type. - */ -enum ImageType -{ - Y_U8, /**< 8-bit unsigned gray. */ - Y_U16, /**< 16-bit unsigned gray. */ - Y_S8, /**< 8-bit signed gray. */ - Y_S16, /**< 16-bit signed gray. */ - Y_F16, /**< half normalized gray. */ - Y_F32, /**< float normalized gray. */ - RGB_U8, /**< 8-bit RGB. */ - RGB_U16, /**< 16-bit RGB. */ - RGB_F16, /**< half RGB. */ - RGB_F32, /**< float RGB. */ - BGR_U8, /**< 8-bit BGR. */ - BGR_U16, /**< 16-bit BGR. */ - BGR_F16, /**< half BGR. */ - BGR_F32, /**< float BGR. */ - RGBA_U8, /**< 8-bit RGBA. */ - RGBA_U16, /**< 16-bit RGBA. */ - RGBA_F16, /**< half RGBA. */ - RGBA_F32, /**< float RGBA. */ - PLANAR_RGB_U8, /**< 8-bit planar RGB. */ - PLANAR_RGB_U16, /**< 16-bit planar RGB. */ - PLANAR_RGB_F16, /**< half planar RGB. */ - PLANAR_RGB_F32, /**< float planar RGB. */ - PLANAR_BGR_U8, /**< 8-bit planar BGR. */ - PLANAR_BGR_U16, /**< 16-bit planar BGR. */ - PLANAR_BGR_F16, /**< half planar BGR. */ - PLANAR_BGR_F32, /**< float planar BGR. */ - PLANAR_RGBA_U8, /**< 8-bit planar RGBA. */ - PLANAR_RGBA_U16, /**< 16-bit planar RGBA. */ - PLANAR_RGBA_F16, /**< half planar RGBA. */ - PLANAR_RGBA_F32, /**< float planar RGBA. */ - NV12, /**< 8-bit planar Y + interleaved and subsampled (1/4 Y samples) UV. */ - NV24, /**< 8-bit planar Y + interleaved UV. */ -}; - -/** - * Struct type for image preprocessing params - */ -struct ImagePreProcessingParams -{ - ImageType imgType; /**< Input Image Type. */ - float pixelMean[3]; /**< Image Mean value offset for R,G,B channels. Default is 0.0f */ - float normalization[3]; /**< Scale or normalization values for R,G,B channels. Default is 1.0/255.0f */ - float stdDev[3]; /**< Standard deviation values for R,G,B channels. Default is 1.0f */ -}; - -template -struct IsCompositeImage : std::integral_constant -{ -}; - -template -struct IsPlanarImage - : std::integral_constant -{ -}; - -template -struct IsInterleavedImage : std::integral_constant::value && !IsPlanarImage::value> -{ -}; - -/** - * Image traits that map ImageType to TensorLayout, ChannelCount and ChannelType. - */ -template -struct ImageTraits; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::HWC; - static constexpr ChannelCount CC = ChannelCount::C1; - static constexpr ChannelType CT = ChannelType::U8; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NHWC; - static constexpr ChannelCount CC = ChannelCount::C1; - static constexpr ChannelType CT = ChannelType::U8; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::HWC; - static constexpr ChannelCount CC = ChannelCount::C1; - static constexpr ChannelType CT = ChannelType::U16; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NHWC; - static constexpr ChannelCount CC = ChannelCount::C1; - static constexpr ChannelType CT = ChannelType::U16; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::HWC; - static constexpr ChannelCount CC = ChannelCount::C1; - static constexpr ChannelType CT = ChannelType::S8; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NHWC; - static constexpr ChannelCount CC = ChannelCount::C1; - static constexpr ChannelType CT = ChannelType::S8; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::HWC; - static constexpr ChannelCount CC = ChannelCount::C1; - static constexpr ChannelType CT = ChannelType::S16; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NHWC; - static constexpr ChannelCount CC = ChannelCount::C1; - static constexpr ChannelType CT = ChannelType::S16; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::HWC; - static constexpr ChannelCount CC = ChannelCount::C1; - static constexpr ChannelType CT = ChannelType::F32; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NHWC; - static constexpr ChannelCount CC = ChannelCount::C1; - static constexpr ChannelType CT = ChannelType::F32; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::HWC; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U8; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NHWC; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U8; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::HWC; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U16; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NHWC; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U16; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::HWC; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::F32; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NHWC; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::F32; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::HWC; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U8; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NHWC; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U8; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::HWC; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U16; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NHWC; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U16; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::HWC; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::F32; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NHWC; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::F32; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::CHW; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U8; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NCHW; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U8; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::CHW; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U16; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NCHW; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U16; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::CHW; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::F32; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NCHW; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::F32; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::CHW; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U8; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NCHW; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U8; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::CHW; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U16; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NCHW; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::U16; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::CHW; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::F32; -}; - -template<> -struct ImageTraits -{ - static constexpr TensorLayout TL = TensorLayout::NCHW; - static constexpr ChannelCount CC = ChannelCount::C3; - static constexpr ChannelType CT = ChannelType::F32; -}; - -/** - * Get the bytes of each element for a specific ImageType. - */ -inline size_t GetImageElementSize(const ImageType type) -{ - size_t imageElementSize; - - switch (type) - { - case ImageType::Y_U8: - case ImageType::Y_S8: - case ImageType::RGB_U8: - case ImageType::BGR_U8: - case ImageType::RGBA_U8: - case ImageType::PLANAR_RGB_U8: - case ImageType::PLANAR_BGR_U8: - case ImageType::PLANAR_RGBA_U8: - { - imageElementSize = 1; - break; - } - case ImageType::Y_U16: - case ImageType::Y_S16: - case ImageType::RGB_U16: - case ImageType::BGR_U16: - case ImageType::RGBA_U16: - case ImageType::PLANAR_RGB_U16: - case ImageType::PLANAR_BGR_U16: - case ImageType::PLANAR_RGBA_U16: - case ImageType::Y_F16: - case ImageType::RGB_F16: - case ImageType::BGR_F16: - case ImageType::RGBA_F16: - case ImageType::PLANAR_RGB_F16: - case ImageType::PLANAR_BGR_F16: - case ImageType::PLANAR_RGBA_F16: - { - imageElementSize = 2; - break; - } - case ImageType::Y_F32: - case ImageType::RGB_F32: - case ImageType::BGR_F32: - case ImageType::RGBA_F32: - case ImageType::PLANAR_RGB_F32: - case ImageType::PLANAR_BGR_F32: - case ImageType::PLANAR_RGBA_F32: - { - imageElementSize = 4; - break; - } - default: - { - imageElementSize = 0; - } - } - - return imageElementSize; -} - -/** - * Get the number of channels for a specific ImageType. - */ -inline size_t GetImageChannelCount(const ImageType type) -{ - size_t imageChannelCount; - - switch (type) - { - case ImageType::Y_U8: - case ImageType::Y_U16: - case ImageType::Y_S8: - case ImageType::Y_S16: - case ImageType::Y_F16: - case ImageType::Y_F32: - { - imageChannelCount = 1; - break; - } - case ImageType::RGB_U8: - case ImageType::RGB_U16: - case ImageType::RGB_F16: - case ImageType::RGB_F32: - case ImageType::BGR_U8: - case ImageType::BGR_U16: - case ImageType::BGR_F16: - case ImageType::BGR_F32: - case ImageType::PLANAR_RGB_U8: - case ImageType::PLANAR_RGB_U16: - case ImageType::PLANAR_RGB_F16: - case ImageType::PLANAR_RGB_F32: - case ImageType::PLANAR_BGR_U8: - case ImageType::PLANAR_BGR_U16: - case ImageType::PLANAR_BGR_F16: - case ImageType::PLANAR_BGR_F32: - { - imageChannelCount = 3; - break; - } - case ImageType::RGBA_U8: - case ImageType::RGBA_U16: - case ImageType::RGBA_F16: - case ImageType::RGBA_F32: - case ImageType::PLANAR_RGBA_U8: - case ImageType::PLANAR_RGBA_U16: - case ImageType::PLANAR_RGBA_F16: - case ImageType::PLANAR_RGBA_F32: - { - imageChannelCount = 4; - break; - } - default: - { - imageChannelCount = 0; - } - } - - return imageChannelCount; -}; - -template -class Image -{ -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image : public Tensor -{ - using Tensor::Tensor; -}; - -template<> -class Image -{ -public: - Image(std::size_t width, std::size_t height, bool isCPU = true) - : m_data(std::make_tuple(Y(width, height, isCPU), UV(width / 2, height / 2, isCPU))) - { - assert(width % 2 == 0 && height % 2 == 0); - } - - Image(std::size_t width, std::size_t height, std::uint8_t *dataPtrLuma, std::uint8_t *dataPtrChroma, - bool isCPU = true) - : m_data(std::make_tuple(Y(width, height, dataPtrLuma, isCPU), UV(width / 2, height / 2, dataPtrChroma, isCPU))) - { - assert(width % 2 == 0 && height % 2 == 0); - } - - Image(std::size_t width, std::size_t height, std::size_t rowPitchLuma, std::size_t rowPitchChroma, - std::uint8_t *dataPtrLuma, std::uint8_t *dataPtrChroma, bool isCPU = true) - : m_data(std::make_tuple(Y(width, height, rowPitchLuma, dataPtrLuma, isCPU), - UV(width / 2, height / 2, rowPitchChroma, dataPtrChroma, isCPU))) - { - assert(width % 2 == 0 && height % 2 == 0); - } - - std::size_t getLumaWidth() const - { - return std::get<0>(m_data).getWidth(); - } - - std::size_t getLumaHeight() const - { - return std::get<0>(m_data).getHeight(); - } - - std::size_t getChromaWidth() const - { - return std::get<1>(m_data).getWidth(); - } - - std::size_t getChromaHeight() const - { - return std::get<1>(m_data).getHeight(); - } - - std::size_t getLumaStride(TensorDimension dim) const - { - return std::get<0>(m_data).getStride(dim); - } - - std::size_t getChromaStride(TensorDimension dim) const - { - return std::get<1>(m_data).getStride(dim); - } - - std::uint8_t *getLumaData() - { - return std::get<0>(m_data).getData(); - } - - std::uint8_t *getChromaData() - { - return std::get<1>(m_data).getData(); - } - - const std::uint8_t *getLumaData() const - { - return std::get<0>(m_data).getData(); - } - - std::size_t getLumaDataSize() const - { - return std::get<0>(m_data).getDataSize(); - } - - const std::uint8_t *getChromaData() const - { - return std::get<1>(m_data).getData(); - } - - std::size_t getChromaDataSize() const - { - return std::get<1>(m_data).getDataSize(); - } - - bool isCPU() const - { - return std::get<0>(m_data).isCPU(); - } - - friend void Copy(Image &dst, const Image &src, cudaStream_t stream); - -private: - using Y = Tensor; - using UV = Tensor; - - std::tuple m_data; -}; - -template<> -class Image -{ -public: - Image(std::size_t width, std::size_t height, bool isCPU = true) - : m_data(std::make_tuple(Y(width, height, isCPU), UV(width, height, isCPU))) - { - } - - Image(std::size_t width, std::size_t height, std::uint8_t *dataPtrLuma, std::uint8_t *dataPtrChroma, - bool isCPU = true) - : m_data(std::make_tuple(Y(width, height, dataPtrLuma, isCPU), UV(width, height, dataPtrChroma, isCPU))) - { - } - - Image(std::size_t width, std::size_t height, std::size_t rowPitchLuma, std::size_t rowPitchChroma, - std::uint8_t *dataPtrLuma, std::uint8_t *dataPtrChroma, bool isCPU = true) - : m_data(std::make_tuple(Y(width, height, rowPitchLuma, dataPtrLuma, isCPU), - UV(width, height, rowPitchChroma, dataPtrChroma, isCPU))) - { - } - - std::size_t getLumaWidth() const - { - return std::get<0>(m_data).getWidth(); - } - - std::size_t getLumaHeight() const - { - return std::get<0>(m_data).getHeight(); - } - - std::size_t getChromaWidth() const - { - return std::get<1>(m_data).getWidth(); - } - - std::size_t getChromaHeight() const - { - return std::get<1>(m_data).getHeight(); - } - - std::size_t getLumaStride(TensorDimension dim) const - { - return std::get<0>(m_data).getStride(dim); - } - - std::size_t getChromaStride(TensorDimension dim) const - { - return std::get<1>(m_data).getStride(dim); - } - - std::uint8_t *getLumaData() - { - return std::get<0>(m_data).getData(); - } - - const std::uint8_t *getLumaData() const - { - return std::get<0>(m_data).getData(); - } - - std::size_t getLumaDataSize() const - { - return std::get<0>(m_data).getDataSize(); - } - - std::uint8_t *getChromaData() - { - return std::get<1>(m_data).getData(); - } - - const std::uint8_t *getChromaData() const - { - return std::get<1>(m_data).getData(); - } - - std::size_t getChromaDataSize() const - { - return std::get<1>(m_data).getDataSize(); - } - - bool isCPU() const - { - return std::get<0>(m_data).isCPU(); - } - - friend void Copy(Image &dst, const Image &src, cudaStream_t stream); - -private: - using Y = Tensor; - using UV = Tensor; - - std::tuple m_data; -}; - -void inline Copy(Image &dst, const Image &src, cudaStream_t stream = 0) -{ - Copy(std::get<0>(dst.m_data), std::get<0>(src.m_data), stream); - Copy(std::get<1>(dst.m_data), std::get<1>(src.m_data), stream); -} - -void inline Copy(Image &dst, const Image &src, cudaStream_t stream = 0) -{ - Copy(std::get<0>(dst.m_data), std::get<0>(src.m_data), stream); - Copy(std::get<1>(dst.m_data), std::get<1>(src.m_data), stream); -} - -} // namespace cvcore - -#endif // CVCORE_IMAGE_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Instrumentation.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Instrumentation.h deleted file mode 100644 index 6324b8d..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Instrumentation.h +++ /dev/null @@ -1,65 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_INSTRUMENTATION_H -#define CVCORE_INSTRUMENTATION_H - -#include - -namespace cvcore { namespace profiler { - -/** - * A enum class used to find out the type of profiler output required - */ -enum class ProfilerJsonOutputType : uint32_t -{ - JSON_OFF, /**< print the aggregate values of each timer in pretty print format */ - JSON_AGGREGATE, /**< print the aggregate values of each timer in JSON format - along with the pretty print format. Pretty print format - gets printed on the terminal */ - JSON_SEPARATE /**< print all the elapsed times for all timers along with the - aggregate values from JSON_AGGREGATE option */ -}; - -/** -* Flush call to print the timer values in a file input -* @param jsonHelperType used to find out the type of profiler output required -* @return filename used to write the timer values -*/ -void flush(const std::string& filename, ProfilerJsonOutputType jsonHelperType); - -/** -* Flush call to print the timer values in a output stream -* @param jsonHelperType used to find out the type of profiler output required -* @return output stream used to write the timer values -*/ -void flush(std::ostream& output, ProfilerJsonOutputType jsonHelperType); - -/** -* Flush call to print the timer values on the terminal -* @param jsonHelperType used to find out the type of profiler output required -*/ -void flush(ProfilerJsonOutputType jsonHelperType); - - -/** -* Clear all the profile timers -*/ -void clear(); - -}} -#endif diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/MathTypes.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/MathTypes.h deleted file mode 100644 index fd9db1a..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/MathTypes.h +++ /dev/null @@ -1,234 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_Math_H -#define CVCORE_Math_H - -#include -#include -#include -#include - -#include "Tensor.h" - -namespace cvcore { - -/** Matrix types using Tensor backend */ -using Matrixf = Tensor; -using Matrixd = Tensor; - -/** - * A struct. - * Structure used to store Vector2 Values. - */ -template -struct Vector2 -{ - T x; /**< point x coordinate. */ - T y; /**< point y coordinate. */ - - inline T &operator[](size_t i) - { - if (i == 0) - { - return x; - } - else if (i == 1) - { - return y; - } - else - { - throw std::out_of_range("cvcore::Vector2 ==> Requested index is out of bounds"); - } - } -}; - -/** - * A struct. - * Structure used to store Vector3 Values. - */ -template -struct Vector3 -{ - T x; /**< point x coordinate. */ - T y; /**< point y coordinate. */ - T z; /**< point z coordinate. */ - - inline T &operator[](size_t i) - { - if (i == 0) - { - return x; - } - else if (i == 1) - { - return y; - } - else if (i == 2) - { - return z; - } - else - { - throw std::out_of_range("cvcore::Vector3 ==> Requested index is out of bounds"); - } - } -}; - -using Vector2i = Vector2; -using Vector3i = Vector3; - -using Vector2f = Vector2; -using Vector3f = Vector3; - -using Vector2d = Vector2; -using Vector3d = Vector3; - -/** - * A struct - * Structure used to store AxisAngle Rotation parameters. - */ -struct AxisAngleRotation -{ - double angle; /** Counterclockwise rotation angle [0, 2PI]. */ - Vector3d axis; /** 3d axis of rotation. */ - - AxisAngleRotation() - : angle(0.0) - , axis{0, 0, 0} - { - } - - AxisAngleRotation(double angleinput, Vector3d axisinput) - : angle(angleinput) - , axis(axisinput) - { - } -}; - -/** - * A struct. - * Structure used to store quaternion rotation representation. - * A rotation of unit vector u with rotation theta can be represented in quaternion as: - * q={cos(theta/2)+ i(u*sin(theta/2))} -*/ -struct Quaternion -{ - double qx, qy, qz; /** Axis or imaginary component of the quaternion representation. */ - double qw; /** Angle or real component of the quaternion representation. */ - - Quaternion() - : qx(0.0) - , qy(0.0) - , qz(0.0) - , qw(0.0) - { - } - - Quaternion(double qxinput, double qyinput, double qzinput, double qwinput) - : qx(qxinput) - , qy(qyinput) - , qz(qzinput) - , qw(qwinput) - { - } -}; - -/** - * Convert rotation matrix to rotation vector. - * @param rotMatrix Rotation matrix of 3x3 values. - * @return 3D Rotation vector {theta * xaxis, theta * yaxis, theta * zaxis} - * where theta is the angle of rotation in radians - */ -Vector3d RotationMatrixToRotationVector(const std::vector &rotMatrix); - -/** - * Convert rotation matrix to axis angle representation. - * @param rotMatrix Rotation matrix of 3x3 values. - * @return Axis angle rotation - */ -AxisAngleRotation RotationMatrixToAxisAngleRotation(const std::vector &rotMatrix); - -/** - * Convert axis angle representation to rotation matrix. - * @param axisangle Axis angle rotation. - * @return Rotation matrix of 3x3 values. - */ -std::vector AxisAngleToRotationMatrix(const AxisAngleRotation &axisangle); - -/** - * Convert axis angle representation to 3d rotation vector. - * Rotation vector is {theta * xaxis, theta * yaxis, theta * zaxis} - * where theta is the angle of rotation in radians. - * @param axisangle Axis angle rotation. - * @return 3D Rotation Vector - */ -Vector3d AxisAngleRotationToRotationVector(const AxisAngleRotation &axisangle); - -/** - * Convert rotation vector to axis angle representation. - * @param rotVector 3D rotation vector. - * @return Axis angle rotation. - */ -AxisAngleRotation RotationVectorToAxisAngleRotation(const Vector3d &rotVector); - -/** - * Convert axis angle representation to quaternion. - * @param axisangle Axis angle representation. - * @return Quaternion rotation. - */ -Quaternion AxisAngleRotationToQuaternion(const AxisAngleRotation &axisangle); - -/** - * Convert quaternion rotation to axis angle rotation. - * @param qrotation Quaternion rotation representation. - * @return Axis angle rotation. - */ -AxisAngleRotation QuaternionToAxisAngleRotation(const Quaternion &qrotation); - -/** - * Convert quaternion rotation to rotation matrix. - * @param qrotation Quaternion rotation representation. - * @return Rotation matrix. - */ -std::vector QuaternionToRotationMatrix(const Quaternion &qrotation); - -/** - * Convert rotation matrix to Quaternion. - * @param rotMatrix Rotation matrix - * @return Quaternion rotation. - */ -Quaternion RotationMatrixToQuaternion(const std::vector &rotMatrix); - -/** - * A struct. - * Structure used to store Pose3D parameters. - */ -template -struct Pose3 -{ - AxisAngleRotation rotation; /**Rotation expressed in axis angle notation.*/ - Vector3 translation; /*Translation expressed as x,y,z coordinates.*/ -}; - -using Pose3d = Pose3; -using Pose3f = Pose3; - -} // namespace cvcore - -#endif // CVCORE_Math_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Memory.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Memory.h deleted file mode 100644 index 7d3d113..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Memory.h +++ /dev/null @@ -1,135 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_MEMORY_H -#define CVCORE_MEMORY_H - -#include - -#include "Tensor.h" - -namespace cvcore { - -/** - * Implementation of tensor copy. - * @param dst destination TensorBase. - * @param src source TensorBase. - * @param stream cuda stream. - */ -void TensorBaseCopy(TensorBase &dst, const TensorBase &src, cudaStream_t stream = 0); - -/** - * Implementation of tensor copy for 2D pitch linear tensors. - * @param dst destination TensorBase. - * @param src source TensorBase. - * @param dstPitch pitch of destination Tensor in bytes. - * @param srcPitch pitch of source Tensor in bytes. - * @param widthInBytes width in bytes. - * @param height height of tensor. - * @param stream cuda stream. - */ -void TensorBaseCopy2D(TensorBase &dst, const TensorBase &src, int dstPitch, int srcPitch, int widthInBytes, int height, - cudaStream_t stream = 0); - -/** - * Memory copy function between two non HWC/CHW/NHWC/NCHW Tensors. - * @tparam TL TensorLayout type. - * @tparam CC Channel Count. - * @tparam CT ChannelType. - * @param dst destination Tensor. - * @param src source Tensor which copy from. - * @param stream cuda stream. - */ -template::type * = nullptr> -void Copy(Tensor &dst, const Tensor &src, cudaStream_t stream = 0) -{ - TensorBaseCopy(dst, src, stream); -} - -/** - * Memory copy function between two HWC Tensors. - * @tparam TL TensorLayout type. - * @tparam CC Channel Count. - * @tparam CT ChannelType. - * @param dst destination Tensor. - * @param src source Tensor which copy from. - * @param stream cuda stream. - */ -template::type * = nullptr> -void Copy(Tensor &dst, const Tensor &src, cudaStream_t stream = 0) -{ - TensorBaseCopy2D(dst, src, dst.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - src.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - dst.getWidth() * dst.getChannelCount() * GetChannelSize(CT), src.getHeight(), stream); -} - -/** - * Memory copy function between two NHWC Tensors. - * @tparam TL TensorLayout type. - * @tparam CC Channel Count. - * @tparam CT ChannelType. - * @param dst destination Tensor. - * @param src source Tensor which copy from. - * @param stream cuda stream. - */ -template::type * = nullptr> -void Copy(Tensor &dst, const Tensor &src, cudaStream_t stream = 0) -{ - TensorBaseCopy2D(dst, src, dst.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - src.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - dst.getWidth() * dst.getChannelCount() * GetChannelSize(CT), src.getDepth() * src.getHeight(), - stream); -} - -/** - * Memory copy function between two CHW Tensors. - * @tparam TL TensorLayout type. - * @tparam CC Channel Count. - * @tparam CT ChannelType. - * @param dst destination Tensor. - * @param src source Tensor which copy from. - * @param stream cuda stream. - */ -template::type * = nullptr> -void Copy(Tensor &dst, const Tensor &src, cudaStream_t stream = 0) -{ - TensorBaseCopy2D(dst, src, dst.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - src.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), dst.getWidth() * GetChannelSize(CT), - src.getChannelCount() * src.getHeight(), stream); -} - -/** - * Memory copy function between two NCHW Tensors. - * @tparam TL TensorLayout type. - * @tparam CC Channel Count. - * @tparam CT ChannelType. - * @param dst destination Tensor. - * @param src source Tensor which copy from. - * @param stream cuda stream. - */ -template::type * = nullptr> -void Copy(Tensor &dst, const Tensor &src, cudaStream_t stream = 0) -{ - TensorBaseCopy2D(dst, src, dst.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - src.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), dst.getWidth() * GetChannelSize(CT), - src.getDepth() * src.getChannelCount() * src.getHeight(), stream); -} - -} // namespace cvcore - -#endif // CVCORE_MEMORY_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Model.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Model.h deleted file mode 100644 index 4a14945..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Model.h +++ /dev/null @@ -1,50 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_MODEL_H -#define CVCORE_MODEL_H - -#include - -#include "Image.h" - -namespace cvcore { - -/** - * Struct to describe input type required by the model - */ -struct ModelInputParams -{ - size_t maxBatchSize; /**< maxbatchSize supported by network*/ - size_t inputLayerWidth; /**< Input layer width */ - size_t inputLayerHeight; /**< Input layer Height */ - ImageType modelInputType; /**< Input Layout type */ -}; - -/** - * Struct to describe the model - */ -struct ModelInferenceParams -{ - std::string engineFilePath; /**< Engine file path. */ - std::vector inputLayers; /**< names of input layers. */ - std::vector outputLayers; /**< names of output layers. */ -}; - -} // namespace cvcore - -#endif // CVCORE_MODEL_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/ProfileUtils.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/ProfileUtils.h deleted file mode 100644 index 0a4e9a5..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/ProfileUtils.h +++ /dev/null @@ -1,40 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_PROFILE_UTILS_H -#define CVCORE_PROFILE_UTILS_H - -#include - -namespace cvcore { - -/** - * Export one profiling item to specified json file. - * @param outputPath output json file path. - * @param taskName item name showing in the output json file. - * @param tMin minimum running time in milliseconds. - * @param tMax maximum running time in milliseconds. - * @param tAvg average running time in milliseconds. - * @param isCPU whether CPU or GPU time. - * @param iterations number of iterations. - */ -void ExportToJson(const std::string outputPath, const std::string taskName, float tMin, float tMax, float tAvg, - bool isCPU, int iterations = 100); - -} // namespace cvcore - -#endif // CVCORE_PROFILE_UTILS_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Tensor.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Tensor.h deleted file mode 100644 index b06e531..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Tensor.h +++ /dev/null @@ -1,1189 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_TENSOR_H -#define CVCORE_TENSOR_H - -#include -#include -#include -#include - -namespace cvcore { - -// there is no CUDA dependency at the data level, so we map half type to uint16_t for now -using half = std::uint16_t; - -/** - * An enum. - * Enum type for tensor layout type. - */ -enum TensorLayout -{ - LC, /**< length, channel (channel interleaved). */ - CL, /**< channel, length (channel planar). */ - HWC, /**< height, width, channel (channel interleaved). */ - CHW, /**< channel, height, width (channel planar). */ - DHWC, /**< depth, height, width, channel (channel interleaved). */ - NHWC = DHWC, /**< alias for DHWC. */ - DCHW, /**< depth, channel, height, width (channel planar). */ - NCHW = DCHW, /**< alias for DCHW. */ - CDHW, /**< channel, depth, height, width (channel planar). */ -}; - -/** - * An enum. - * Enum type for tensor channel count. - */ -enum ChannelCount -{ - C1, /**< 1 channels. */ - C2, /**< 2 channels. */ - C3, /**< 3 channels. */ - C4, /**< 4 channels. */ - CX, /**< varying number of channels. */ -}; - -/** - * An enum. - * Enum type for channel type. - */ -enum ChannelType -{ - U8, /**< uint8_t. */ - U16, /**< uint16_t. */ - S8, /**< int8_t. */ - S16, /**< int16_t. */ - F16, /**< cvcore::half. */ - F32, /**< float. */ - F64, /**< double. */ -}; - -/** - * An enum. - * Enum type for dimension type. - */ -enum class TensorDimension -{ - LENGTH, /**< length dimension. */ - HEIGHT, /**< height dimension. */ - WIDTH, /**< width dimension. */ - CHANNEL, /**< channel dimension. */ - DEPTH, /**< depth dimension. */ -}; - -/** - * Function to get name of a TensorLayout value as string. - * @param TL the TensorLayout value. - * @return string name of TL. - */ -std::string GetTensorLayoutAsString(TensorLayout TL); - -/** - * Function to get name of a ChannelCount value as string. - * @param CC the ChannelCount value. - * @return string name of CC. - */ -std::string GetChannelCountAsString(ChannelCount CC); - -/** - * Function to get name of a ChannelType value as string. - * @param CT the ChannelType value. - * @return string name of CT. - */ -std::string GetChannelTypeAsString(ChannelType CT); - -/** - * Function to get name of a Memory type used. - * @param bool isCPU of Tensor as input - * @return string name of Memory type. - */ -std::string GetMemoryTypeAsString(bool isCPU); - -/** - * Function to get element size (in bytes) of a ChannelType. - * @param CT the ChannelType value. - * @return size in bytes. - */ -std::size_t GetChannelSize(ChannelType CT); - -/** - * Implementation of TensorBase class. - */ -class TensorBase -{ -public: - /** - * Struct for storing dimension data. - */ - struct DimData - { - std::size_t size; /**< size of each dimension. */ - std::size_t stride; /**< stride of each dimension. */ - }; - - /** - * Constructor of a non-owning tensor. - * @param type ChannelType of the Tensor. - * @param dimData pointer to the DimData array. - * @param dimCount number of dimensions. - * @param dataPtr raw pointer to the source data array. - * @param isCPU whether to allocate tensor on CPU or GPU. - */ - TensorBase(ChannelType type, const DimData *dimData, int dimCount, void *dataPtr, bool isCPU); - - /** - * Constructor of a non-owning tensor. - * @param type ChannelType of the Tensor. - * @param dimData initializer_list of DimData. - * @param dataPtr raw pointer to the source data array. - * @param isCPU whether to allocate tensor on CPU or GPU. - */ - TensorBase(ChannelType type, std::initializer_list dimData, void *dataPtr, bool isCPU); - - /** - * Constructor of a memory-owning tensor. - * @param type ChannelType of the Tensor. - * @param dimData pointer to the DimData array. - * @param dimCount number of dimensions. - * @param isCPU whether to allocate tensor on CPU or GPU. - */ - TensorBase(ChannelType type, const DimData *dimData, int dimCount, bool isCPU); - - /** - * Constructor of a memory-owning tensor. - * @param type ChannelType of the Tensor. - * @param dimData initializer_list of DimData. - * @param isCPU whether to allocate tensor on CPU or GPU. - */ - TensorBase(ChannelType type, std::initializer_list dimData, bool isCPU); - - /** - * Destructor of Tensor. - */ - ~TensorBase(); - - /** - * TensorBase is non-copyable. - */ - TensorBase(const TensorBase &) = delete; - - /** - * TensorBase is non-copyable. - */ - TensorBase &operator=(const TensorBase &) = delete; - - /** - * Move Constructor of TensorBase. - */ - TensorBase(TensorBase &&); - - /** - * Move operator of TensorBase. - */ - TensorBase &operator=(TensorBase &&); - - /** - * Get the dimension count of TensorBase. - * @return number of dimensions. - */ - int getDimCount() const; - - /** - * Get the size of given dimension. - * @param dimIdx dimension index. - * @return size of the specified dimension. - */ - std::size_t getSize(int dimIdx) const; - - /** - * Get the stride of given dimension. - * @param dimIdx dimension index. - * @return stride of the specified dimension. - */ - std::size_t getStride(int dimIdx) const; - - /** - * Get the ChannelType of the Tensor. - * @return ChannelType of the Tensor. - */ - ChannelType getType() const; - - /** - * Get the raw data pointer to the Tensor. - * @return void data pointer to the Tensor. - */ - void *getData() const; - - /** - * Get the total size of the Tensor in bytes. - * @return total bytes of the Tensor. - */ - std::size_t getDataSize() const; - - /** - * Get the flag whether the Tensor is allocated in CPU or GPU. - * @return whether the Tensor is allocated in CPU. - */ - bool isCPU() const; - - /** - * Get the flag whether the Tensor owns the data. - * @return whether the Tensor owns data in memory. - */ - bool isOwning() const; - -protected: - TensorBase(); - -private: - static constexpr int kMinDimCount = 2; - static constexpr int kMaxDimCount = 4; - - void *m_data; - - int m_dimCount; - DimData m_dimData[kMaxDimCount]; - - ChannelType m_type; - bool m_isOwning; - bool m_isCPU; -}; - -namespace detail { - -template -struct DimToIndex2D -{ - static_assert(TL == LC || TL == CL, "unsupported variant!"); - static constexpr int kLength = TL == LC ? 0 : 1; - static constexpr int kChannel = TL == LC ? 1 : 0; -}; - -template -struct DimToIndex3D -{ - static_assert(TL == HWC || TL == CHW, "unsupported variant!"); - static constexpr int kWidth = TL == HWC ? 1 : 2; - static constexpr int kHeight = TL == HWC ? 0 : 1; - static constexpr int kChannel = TL == HWC ? 2 : 0; -}; - -template -struct DimToIndex4D -{ - static_assert(TL == DHWC || TL == DCHW || TL == CDHW, "unsupported variant!"); - static constexpr int kWidth = TL == DHWC ? 2 : (TL == DCHW ? 3 : 3); - static constexpr int kHeight = TL == DHWC ? 1 : (TL == DCHW ? 2 : 2); - static constexpr int kDepth = TL == DHWC ? 0 : (TL == DCHW ? 0 : 1); - static constexpr int kChannel = TL == DHWC ? 3 : (TL == DCHW ? 1 : 0); -}; - -template -struct LayoutToIndex -{ -}; - -template -struct LayoutToIndex::type> : public DimToIndex2D -{ - static constexpr int kDimCount = 2; -}; - -template -struct LayoutToIndex::type> : public DimToIndex3D -{ - static constexpr int kDimCount = 3; -}; - -template -struct LayoutToIndex::type> - : public DimToIndex4D -{ - static constexpr int kDimCount = 4; -}; - -template -struct ChannelTypeToNative -{ -}; - -template<> -struct ChannelTypeToNative -{ - using Type = std::uint8_t; -}; - -template<> -struct ChannelTypeToNative -{ - using Type = std::uint16_t; -}; - -template<> -struct ChannelTypeToNative -{ - using Type = std::int8_t; -}; - -template<> -struct ChannelTypeToNative -{ - using Type = std::int16_t; -}; - -template<> -struct ChannelTypeToNative -{ - using Type = float; -}; - -template<> -struct ChannelTypeToNative -{ - using Type = cvcore::half; -}; - -template<> -struct ChannelTypeToNative -{ - using Type = double; -}; - -template -constexpr std::size_t ChannelToCount() -{ - switch (CC) - { - case C1: - return 1; - case C2: - return 2; - case C3: - return 3; - case C4: - return 4; - } - return 0; // this is safe as this function will never be called for dynamic channel counts -} - -/** - * Implementation of 2D tensors. - * @tparam TL tensor layout type. - * @tparam CT channel type. - */ -template -class Tensor2D : public TensorBase -{ - using DataType = typename ChannelTypeToNative::Type; - -public: - /** - * Default Constructor. - */ - Tensor2D() = default; - - /** - * Constructor of a memory-owning 2D tensor. - * @param dimData initializer_list of DimData. - * @param isCPU whether to allocate tensor on CPU or GPU. - */ - Tensor2D(std::initializer_list dimData, bool isCPU) - : TensorBase(CT, dimData, isCPU) - { - } - - /** - * Constructor of a non-owning 2D tensor. - * @param dimData initializer_list of DimData. - * @param dataPtr raw pointer to the source data array. - * @param isCPU whether to allocate tensor on CPU or GPU. - */ - Tensor2D(std::initializer_list dimData, DataType *dataPtr, bool isCPU) - : TensorBase(CT, dimData, dataPtr, isCPU) - { - } - - /** - * Get the length of the 2D tensor. - * @return length of the 2D tensor. - */ - std::size_t getLength() const - { - return getSize(DimToIndex2D::kLength); - } - - /** - * Get the channel count of the 2D tensor. - * @return channel count of the 2D tensor. - */ - std::size_t getChannelCount() const - { - return getSize(DimToIndex2D::kChannel); - } - - /** - * Expose base getStride() function. - */ - using TensorBase::getStride; - - /** - * Get the stride of the 2D tensor. - * @param dim tensor dimension. - * @return tensor stride of the given dimension. - */ - std::size_t getStride(TensorDimension dim) const - { - switch (dim) - { - case TensorDimension::LENGTH: - return getStride(DimToIndex2D::kLength); - case TensorDimension::CHANNEL: - return getStride(DimToIndex2D::kChannel); - default: - throw std::out_of_range("cvcore::Tensor2D::getStride ==> Requested TensorDimension is out of bounds"); - } - } - - /** - * Get the raw data pointer to the 2D tensor. - * @return data pointer to the 2D tensor. - */ - DataType *getData() - { - return reinterpret_cast(TensorBase::getData()); - } - - /** - * Get the const raw data pointer to the 2D tensor. - * @return const data pointer to the 2D tensor. - */ - const DataType *getData() const - { - return reinterpret_cast(TensorBase::getData()); - } -}; - -/** - * Implementation of 3D tensors. - * @tparam TL tensor layout type. - * @tparam CT channel type. - */ -template -class Tensor3D : public TensorBase -{ - using DataType = typename ChannelTypeToNative::Type; - -public: - /** - * Default Constructor. - */ - Tensor3D() = default; - - /** - * Constructor of a memory-owning 3D tensor. - * @param dimData initializer_list of DimData. - * @param isCPU whether to allocate tensor on CPU or GPU. - */ - Tensor3D(std::initializer_list dimData, bool isCPU) - : TensorBase(CT, dimData, isCPU) - { - } - - /** - * Constructor of a non-owning 3D tensor. - * @param dimData initializer_list of DimData. - * @param dataPtr raw pointer to the source data array. - * @param isCPU whether to allocate tensor on CPU or GPU. - */ - Tensor3D(std::initializer_list dimData, DataType *dataPtr, bool isCPU) - : TensorBase(CT, dimData, dataPtr, isCPU) - { - } - - /** - * Get the width of the 3D tensor. - * @return width of the 3D tensor. - */ - std::size_t getWidth() const - { - return getSize(DimToIndex3D::kWidth); - } - - /** - * Get the height of the 3D tensor. - * @return height of the 3D tensor. - */ - std::size_t getHeight() const - { - return getSize(DimToIndex3D::kHeight); - } - - /** - * Get the channel count of the 3D tensor. - * @return channel count of the 3D tensor. - */ - std::size_t getChannelCount() const - { - return getSize(DimToIndex3D::kChannel); - } - - /** - * Expose base getStride() function. - */ - using TensorBase::getStride; - - /** - * Get the stride of the 3D tensor. - * @param dim tensor dimension. - * @return tensor stride of the given dimension. - */ - std::size_t getStride(TensorDimension dim) const - { - switch (dim) - { - case TensorDimension::HEIGHT: - return getStride(DimToIndex3D::kHeight); - case TensorDimension::WIDTH: - return getStride(DimToIndex3D::kWidth); - case TensorDimension::CHANNEL: - return getStride(DimToIndex3D::kChannel); - default: - throw std::out_of_range("cvcore::Tensor3D::getStride ==> Requested TensorDimension is out of bounds"); - } - } - - /** - * Get the raw data pointer to the 3D tensor. - * @return data pointer to the 3D tensor. - */ - DataType *getData() - { - return reinterpret_cast(TensorBase::getData()); - } - - /** - * Get the const raw data pointer to the 3D tensor. - * @return const data pointer to the 3D tensor. - */ - const DataType *getData() const - { - return reinterpret_cast(TensorBase::getData()); - } -}; - -/** - * Implementation of 4D tensors. - * @tparam TL tensor layout type. - * @tparam CT channel type. - */ -template -class Tensor4D : public TensorBase -{ - using DataType = typename ChannelTypeToNative::Type; - -public: - /** - * Default Constructor. - */ - Tensor4D() = default; - - /** - * Constructor of a memory-owning 4D tensor. - * @param dimData initializer_list of DimData. - * @param isCPU whether to allocate tensor on CPU or GPU. - */ - Tensor4D(std::initializer_list dimData, bool isCPU) - : TensorBase(CT, dimData, isCPU) - { - } - - /** - * Constructor of a non-owning 4D tensor. - * @param dimData initializer_list of DimData. - * @param dataPtr raw pointer to the source data array. - * @param isCPU whether to allocate tensor on CPU or GPU. - */ - Tensor4D(std::initializer_list dimData, DataType *dataPtr, bool isCPU) - : TensorBase(CT, dimData, dataPtr, isCPU) - { - } - - /** - * Get the width of the 4D tensor. - * @return width of the 4D tensor. - */ - std::size_t getWidth() const - { - return getSize(DimToIndex4D::kWidth); - } - - /** - * Get the height of the 4D tensor. - * @return height of the 4D tensor. - */ - std::size_t getHeight() const - { - return getSize(DimToIndex4D::kHeight); - } - - /** - * Get the depth of the 4D tensor. - * @return depth of the 4D tensor. - */ - std::size_t getDepth() const - { - return getSize(DimToIndex4D::kDepth); - } - - /** - * Get the channel count of the 4D tensor. - * @return channel count of the 4D tensor. - */ - std::size_t getChannelCount() const - { - return getSize(DimToIndex4D::kChannel); - } - - /** - * Expose base getStride() function. - */ - using TensorBase::getStride; - - /** - * Get the stride of the 4D tensor. - * @param dim tensor dimension. - * @return tensor stride of the given dimension. - */ - std::size_t getStride(TensorDimension dim) const - { - switch (dim) - { - case TensorDimension::HEIGHT: - return getStride(DimToIndex4D::kHeight); - case TensorDimension::WIDTH: - return getStride(DimToIndex4D::kWidth); - case TensorDimension::CHANNEL: - return getStride(DimToIndex4D::kChannel); - case TensorDimension::DEPTH: - return getStride(DimToIndex4D::kDepth); - default: - throw std::out_of_range("cvcore::Tensor4D::getStride ==> Requested TensorDimension is out of bounds"); - } - } - - /** - * Get the raw data pointer to the 4D tensor. - * @return data pointer to the 4D tensor. - */ - DataType *getData() - { - return reinterpret_cast(TensorBase::getData()); - } - - /** - * Get the const raw data pointer to the 4D tensor. - * @return const data pointer to the 4D tensor. - */ - const DataType *getData() const - { - return reinterpret_cast(TensorBase::getData()); - } -}; - -} // namespace detail - -template -class Tensor; - -// 2D Tensors - -/** - * 2D LC tensors. - * @tparam CC channel count. - * @tparam CT channel type. - */ -template -class Tensor : public detail::Tensor2D -{ -public: - using DataType = typename detail::ChannelTypeToNative::Type; - - static constexpr ChannelCount kChannelCount = CC; - - Tensor() = default; - - template::type> - Tensor(std::size_t length, bool isCPU = true) - : detail::Tensor2D({{length, detail::ChannelToCount()}, {detail::ChannelToCount(), 1}}, isCPU) - { - } - - template::type> - Tensor(std::size_t length, std::size_t channelCount, bool isCPU = true) - : detail::Tensor2D({{length, channelCount}, {channelCount, 1}}, isCPU) - { - } - - template::type> - Tensor(std::size_t length, DataType *dataPtr, bool isCPU = true) - : detail::Tensor2D({{length, detail::ChannelToCount()}, {detail::ChannelToCount(), 1}}, dataPtr, - isCPU) - { - } - - template::type> - Tensor(std::size_t length, std::size_t channelCount, DataType *dataPtr, bool isCPU = true) - : detail::Tensor2D({{length, channelCount}, {channelCount, 1}}, dataPtr, isCPU) - { - } -}; - -/** - * 2D CL tensors. - * @tparam CC channel count. - * @tparam CT channel type. - */ -template -class Tensor : public detail::Tensor2D -{ -public: - using DataType = typename detail::ChannelTypeToNative::Type; - - static constexpr ChannelCount kChannelCount = CC; - - Tensor() = default; - - template::type> - Tensor(std::size_t length, bool isCPU = true) - : detail::Tensor2D({{detail::ChannelToCount(), length}, {length, 1}}, isCPU) - { - } - - template::type> - Tensor(std::size_t length, std::size_t channelCount, bool isCPU = true) - : detail::Tensor2D({{channelCount, length}, {length, 1}}, isCPU) - { - } - - template::type> - Tensor(std::size_t length, DataType *dataPtr, bool isCPU = true) - : detail::Tensor2D({{detail::ChannelToCount(), length}, {length, 1}}, dataPtr, isCPU) - { - } - - template::type> - Tensor(std::size_t length, std::size_t channelCount, DataType *dataPtr, bool isCPU = true) - : detail::Tensor2D({{channelCount, length}, {length, 1}}, dataPtr, isCPU) - { - } -}; - -// 3D Tensors - -/** - * 3D HWC tensors. - * @tparam CC channel count. - * @tparam CT channel type. - */ -template -class Tensor : public detail::Tensor3D -{ -public: - using DataType = typename detail::ChannelTypeToNative::Type; - - static constexpr ChannelCount kChannelCount = CC; - - Tensor() = default; - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, B isCPU = true) - : detail::Tensor3D({{height, width * detail::ChannelToCount()}, - {width, detail::ChannelToCount()}, - {detail::ChannelToCount(), 1}}, - isCPU) - { - } - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, std::size_t channelCount, B isCPU = true) - : detail::Tensor3D({{height, width * channelCount}, {width, channelCount}, {channelCount, 1}}, isCPU) - { - } - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, DataType *dataPtr, B isCPU = true) - : detail::Tensor3D({{height, width * detail::ChannelToCount()}, - {width, detail::ChannelToCount()}, - {detail::ChannelToCount(), 1}}, - dataPtr, isCPU) - { - } - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, std::size_t rowPitch, DataType *dataPtr, B isCPU = true) - : detail::Tensor3D({{height, rowPitch / GetChannelSize(CT)}, - {width, detail::ChannelToCount()}, - {detail::ChannelToCount(), 1}}, - dataPtr, isCPU) - { - if (rowPitch % GetChannelSize(CT) != 0) - { - throw std::domain_error( - "cvcore::Tensor::Tensor ==> Parameter rowPitch is not evenly divisible by channel size"); - } - } - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, std::size_t channelCount, DataType *dataPtr, B isCPU = true) - : detail::Tensor3D({{height, width * channelCount}, {width, channelCount}, {channelCount, 1}}, dataPtr, - isCPU) - { - } - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, std::size_t channelCount, std::size_t rowPitch, DataType *dataPtr, - B isCPU = true) - : detail::Tensor3D({{height, rowPitch / GetChannelSize(CT)}, {width, channelCount}, {channelCount, 1}}, - dataPtr, isCPU) - { - if (rowPitch % GetChannelSize(CT) != 0) - { - throw std::domain_error( - "cvcore::Tensor::Tensor ==> Parameter rowPitch is not evenly divisible by channel size"); - } - } -}; - -/** - * 3D CHW tensors. - * @tparam CC channel count. - * @tparam CT channel type. - */ -template -class Tensor : public detail::Tensor3D -{ -public: - using DataType = typename detail::ChannelTypeToNative::Type; - - static constexpr ChannelCount kChannelCount = CC; - - Tensor() = default; - - template::type> - Tensor(std::size_t width, std::size_t height, bool isCPU = true) - : detail::Tensor3D({{detail::ChannelToCount(), width * height}, {height, width}, {width, 1}}, - isCPU) - { - } - - template::type> - Tensor(std::size_t width, std::size_t height, std::size_t channelCount, bool isCPU = true) - : detail::Tensor3D({{channelCount, width * height}, {height, width}, {width, 1}}, isCPU) - { - } - - template::type> - Tensor(std::size_t width, std::size_t height, DataType *dataPtr, bool isCPU = true) - : detail::Tensor3D({{detail::ChannelToCount(), width * height}, {height, width}, {width, 1}}, - dataPtr, isCPU) - { - } - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, std::size_t rowPitch, DataType *dataPtr, B isCPU = true) - : detail::Tensor3D({{detail::ChannelToCount(), height * rowPitch / GetChannelSize(CT)}, - {height, rowPitch / GetChannelSize(CT)}, - {width, 1}}, - dataPtr, isCPU) - { - if (rowPitch % GetChannelSize(CT) != 0) - { - throw std::domain_error( - "cvcore::Tensor::Tensor ==> Parameter rowPitch is not evenly divisible by channel size"); - } - } - - template::type> - Tensor(std::size_t width, std::size_t height, std::size_t channelCount, DataType *dataPtr, bool isCPU = true) - : detail::Tensor3D({{channelCount, width * height}, {height, width}, {width, 1}}, dataPtr, isCPU) - { - } - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, std::size_t channelCount, std::size_t rowPitch, DataType *dataPtr, - B isCPU = true) - : detail::Tensor3D({{channelCount, height * rowPitch / GetChannelSize(CT)}, - {height, rowPitch / GetChannelSize(CT)}, - {width, 1}}, - dataPtr, isCPU) - { - if (rowPitch % GetChannelSize(CT) != 0) - { - throw std::domain_error( - "cvcore::Tensor::Tensor ==> Parameter rowPitch is not evenly divisible by channel size"); - } - } -}; - -// 4D Tensors - -/** - * 4D DHWC tensors. - * @tparam CC channel count. - * @tparam CT channel type. - */ -template -class Tensor : public detail::Tensor4D -{ -public: - using DataType = typename detail::ChannelTypeToNative::Type; - - static constexpr ChannelCount kChannelCount = CC; - - Tensor() = default; - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, std::size_t depth, B isCPU = true) - : detail::Tensor4D({{depth, height * width * detail::ChannelToCount()}, - {height, width * detail::ChannelToCount()}, - {width, detail::ChannelToCount()}, - {detail::ChannelToCount(), 1}}, - isCPU) - { - } - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, std::size_t depth, std::size_t channelCount, B isCPU = true) - : detail::Tensor4D({{depth, height * width * channelCount}, - {height, width * channelCount}, - {width, channelCount}, - {channelCount, 1}}, - isCPU) - { - } - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, std::size_t depth, DataType *dataPtr, B isCPU = true) - : detail::Tensor4D({{depth, height * width * detail::ChannelToCount()}, - {height, width * detail::ChannelToCount()}, - {width, detail::ChannelToCount()}, - {detail::ChannelToCount(), 1}}, - dataPtr, isCPU) - { - } - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, std::size_t depth, std::size_t rowPitch, DataType *dataPtr, - B isCPU = true) - : detail::Tensor4D({{depth, height * rowPitch / GetChannelSize(CT)}, - {height, rowPitch / GetChannelSize(CT)}, - {width, detail::ChannelToCount()}, - {detail::ChannelToCount(), 1}}, - dataPtr, isCPU) - { - if (rowPitch % GetChannelSize(CT) != 0) - { - throw std::domain_error( - "cvcore::Tensor::Tensor ==> Parameter rowPitch is not evenly divisible by channel size"); - } - } - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, std::size_t depth, std::size_t channelCount, DataType *dataPtr, - B isCPU = true) - : detail::Tensor4D({{depth, height * width * channelCount}, - {height, width * channelCount}, - {width, channelCount}, - {channelCount, 1}}, - dataPtr, isCPU) - { - } - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, std::size_t depth, std::size_t channelCount, std::size_t rowPitch, - DataType *dataPtr, B isCPU = true) - : detail::Tensor4D({{depth, height * rowPitch / GetChannelSize(CT)}, - {height, rowPitch / GetChannelSize(CT)}, - {width, channelCount}, - {channelCount, 1}}, - dataPtr, isCPU) - { - if (rowPitch % GetChannelSize(CT) != 0) - { - throw std::domain_error( - "cvcore::Tensor::Tensor ==> Parameter rowPitch is not evenly divisible by channel size"); - } - } -}; - -/** - * 4D DCHW tensors. - * @tparam CC channel count. - * @tparam CT channel type. - */ -template -class Tensor : public detail::Tensor4D -{ -public: - using DataType = typename detail::ChannelTypeToNative::Type; - - static constexpr ChannelCount kChannelCount = CC; - - Tensor() = default; - - template::type> - Tensor(std::size_t width, std::size_t height, std::size_t depth, bool isCPU = true) - : detail::Tensor4D({{depth, detail::ChannelToCount() * width * height}, - {detail::ChannelToCount(), width * height}, - {height, width}, - {width, 1}}, - isCPU) - { - } - - template::type> - Tensor(std::size_t width, std::size_t height, std::size_t depth, std::size_t channelCount, bool isCPU = true) - : detail::Tensor4D( - {{depth, channelCount * width * height}, {channelCount, width * height}, {height, width}, {width, 1}}, - isCPU) - { - } - - template::type> - Tensor(std::size_t width, std::size_t height, std::size_t depth, DataType *dataPtr, bool isCPU = true) - : detail::Tensor4D({{depth, detail::ChannelToCount() * width * height}, - {detail::ChannelToCount(), width * height}, - {height, width}, - {width, 1}}, - dataPtr, isCPU) - { - } - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, std::size_t depth, std::size_t rowPitch, DataType *dataPtr, - B isCPU = true) - : detail::Tensor4D({{depth, detail::ChannelToCount() * height * rowPitch / GetChannelSize(CT)}, - {detail::ChannelToCount(), height * rowPitch / GetChannelSize(CT)}, - {height, rowPitch / GetChannelSize(CT)}, - {width, 1}}, - dataPtr, isCPU) - { - if (rowPitch % GetChannelSize(CT) != 0) - { - throw std::domain_error( - "cvcore::Tensor::Tensor ==> Parameter rowPitch is not evenly divisible by channel size"); - } - } - - template::type> - Tensor(std::size_t width, std::size_t height, std::size_t depth, std::size_t channelCount, DataType *dataPtr, - bool isCPU = true) - : detail::Tensor4D( - {{depth, channelCount * width * height}, {channelCount, width * height}, {height, width}, {width, 1}}, - dataPtr, isCPU) - { - } - - template::value>::type * = nullptr> - Tensor(std::size_t width, std::size_t height, std::size_t depth, std::size_t channelCount, std::size_t rowPitch, - DataType *dataPtr, B isCPU = true) - : detail::Tensor4D({{depth, channelCount * height * rowPitch / GetChannelSize(CT)}, - {channelCount, height * rowPitch / GetChannelSize(CT)}, - {height, rowPitch / GetChannelSize(CT)}, - {width, 1}}, - dataPtr, isCPU) - { - if (rowPitch % GetChannelSize(CT) != 0) - { - throw std::domain_error( - "cvcore::Tensor::Tensor ==> Parameter rowPitch is not evenly divisible by channel size"); - } - } -}; - -/** - * 4D CDHW tensors. - * @tparam CC channel count. - * @tparam CT channel type. - */ -template -class Tensor : public detail::Tensor4D -{ -public: - using DataType = typename detail::ChannelTypeToNative::Type; - - static constexpr ChannelCount kChannelCount = CC; - - Tensor() = default; - - template::type> - Tensor(std::size_t width, std::size_t height, std::size_t depth, bool isCPU = true) - : detail::Tensor4D({{detail::ChannelToCount(), depth * width * height}, - {depth, width * height}, - {height, width}, - {width, 1}}, - isCPU) - { - } - - template::type> - Tensor(std::size_t width, std::size_t height, std::size_t depth, std::size_t channelCount, bool isCPU = true) - : detail::Tensor4D( - {{channelCount, depth * width * height}, {depth, width * height}, {height, width}, {width, 1}}, isCPU) - { - } - - template::type> - Tensor(std::size_t width, std::size_t height, std::size_t depth, DataType *dataPtr, bool isCPU = true) - : detail::Tensor4D({{detail::ChannelToCount(), depth * width * height}, - {depth, width * height}, - {height, width}, - {width, 1}}, - dataPtr, isCPU) - { - } - - template::type> - Tensor(std::size_t width, std::size_t height, std::size_t depth, std::size_t channelCount, DataType *dataPtr, - bool isCPU = true) - : detail::Tensor4D( - {{channelCount, depth * width * height}, {depth, width * height}, {height, width}, {width, 1}}, dataPtr, - isCPU) - { - } -}; - -} // namespace cvcore - -#endif // CVCORE_TENSOR_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/TensorList.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/TensorList.h deleted file mode 100644 index 4ec6695..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/TensorList.h +++ /dev/null @@ -1,37 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_TENSORLIST_H -#define CVCORE_TENSORLIST_H - -#include - -#include "Traits.h" -#include "Array.h" -#include "Tensor.h" - -namespace cvcore { - -/** - * @brief Implementation of a list of tensors of the same rank but, potentially, different dimensions - */ -template -using TensorList = typename std::enable_if::value, Array>::type; - -} // namespace cvcore - -#endif // CVCORE_TENSORLIST_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/TensorMap.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/TensorMap.h deleted file mode 100644 index 93c8783..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/TensorMap.h +++ /dev/null @@ -1,534 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_TENSORMAP_H -#define CVCORE_TENSORMAP_H - -#include -#include -#include -#include -#include - -#include "Traits.h" -#include "TensorList.h" - -namespace cvcore { - -/** - * @brief Implementation of a map of tensors of the same rank but, potentially, different dimensions, over the batch dimension - * - * @tparam TensorType Any CVCORE tensor type - * @tparam KeyType Any STL hashable data type - */ -template -class TensorMap {}; - -template -class TensorMap, KT, - typename std::enable_if>::value>::type> -{ - using my_type = TensorMap, KT>; - - public: - using key_type = KT; - using unit_type = Tensor; - using element_type = traits::remove_batch_t; - using frame_type = TensorList; - using buffer_type = TensorList; - - template - struct dim_data_type - { - std::size_t height; - std::size_t width; - }; - - template - struct dim_data_type::type> - { - std::size_t height; - std::size_t width; - std::size_t channels; - }; - - TensorMap() = default; - TensorMap(const my_type &) = delete; - TensorMap(my_type && other) - { - *this = std::move(other); - } - - /** - * @brief Construct a new Tensor Map object - * - * @param batchSize The batch dimension of all sub-tensors - * @param dimData The dimensional description of all sub-tensors in at least HWC format - * @param isCPU A boolean flag specifying what device to allocate the sub-tensors - */ - template::type> - TensorMap(std::size_t batchSize, - const std::vector> & dimData, - bool isCPU = true) - : m_maxBatchSize{batchSize}, m_size{dimData.size()}, - m_isCPU{isCPU}, m_buffer{dimData.size(), true} - { - m_buffer.setSize(m_size); - - int i = 0; - for(auto dim: dimData) - { - m_buffer[i] = std::move(unit_type(dim.width, - dim.height, - m_maxBatchSize, - m_isCPU)); - ++i; - } - - for(std::size_t i = 0; i < m_maxBatchSize; ++i) - { - m_pool.insert(i); - } - } - - /** - * @brief Construct a new Tensor Map object - * - * @param batchSize The batch dimension of all sub-tensors - * @param dimData The dimensional description of all sub-tensors in at least HWC format - * @param isCPU A boolean flag specifying what device to allocate the sub-tensors - */ - template::type> - TensorMap(std::size_t batchSize, - const std::vector> & dimData, - bool isCPU = true) - : m_maxBatchSize{batchSize}, m_size{dimData.size()}, - m_isCPU{isCPU}, m_buffer{dimData.size(), true} - { - m_buffer.setSize(m_size); - - int i = 0; - for(auto dim: dimData) - { - m_buffer[i] = std::move(unit_type(dim.width, - dim.height, - m_maxBatchSize, - dim.channels, - m_isCPU)); - ++i; - } - - for(std::size_t i = 0; i < m_maxBatchSize; ++i) - { - m_pool.insert(i); - } - } - - ~TensorMap() = default; - - my_type & operator=(const my_type &) = delete; - my_type & operator=(my_type && other) - { - std::swap(m_mapping, other.m_mapping); - std::swap(m_pool, other.m_pool); - std::swap(m_maxBatchSize, other.m_maxBatchSize); - std::swap(m_size, other.m_size); - std::swap(m_isCPU, other.m_isCPU); - std::swap(m_buffer, other.m_buffer); - - return *this; - } - - /** - * @brief A mapping of the batch dimension index to a given key - * - * @details Given a set of pairs such that the keys AND values are unique - * respectively, the key-wise mapping of the batch dimension is reset - * to the provided values. - * - * @param pairs An unordered map of the uniqe key value pairs - * @return true If the length of ``pairs`` is less than the max batch size - * and the key value pairs are one-to-one and onto. - * @return false If the conditions of ``true`` are not met. - */ - bool remap(const std::unordered_map & pairs) - { - bool result = false; - - if(pairs.size() <= m_maxBatchSize) - { - for(std::size_t i = 0; i < m_maxBatchSize; ++i) - { - m_pool.insert(i); - } - - m_mapping.clear(); - for(auto mapping: pairs) - { - if(m_pool.erase(mapping.second)) - { - m_mapping[mapping.first] = mapping.second; - } - } - - if((pairs.size() + m_pool.size()) == m_maxBatchSize) - { - result = true; - } - } - - return result; - } - - /** - * @brief Associates a given key with the first available batch index - * - * @details Assuming the associated keys has not reached `maxBatchSize`` - * then this function associates a given key with the first available - * batch index and returns that index value. If no batch index is - * available -1 is returned. NOTE: if ``key`` is already associated - * with a batch index, the that index is returned. - * - * @param key The key to be associated with a batch index value - * @return std::intmax_t The batch index associated with the key or -1 - * if no index is available. NOTE: because std::intmax_t is not a full - * covering of std::size_t, it is possible for wrap around to happen. - */ - std::intmax_t map(const key_type & key) - { - auto it = m_mapping.find(key); - - if(it == m_mapping.end() && !m_pool.empty()) - { - auto value = m_pool.begin(); - it = m_mapping.insert({key, *value}).first; - m_pool.erase(value); - } - - return static_cast(it != m_mapping.end() ? it->second : -1); - } - - /** - * @brief Dissociates a given key with a batch index if possible - * - * @details Assuming the given key is associated with a batch index this - * function removes the association and returns the batch index is was - * associated with. If no batch index is found associated with the given - * key, -1 is returned. - * - * @param key The key to be dissociated - * @return std::intmax_t The batch index associated with the key or -1 - * if not found. NOTE: because std::intmax_t is not a full covering of - * std::size_t, it is possible for wrap around to happen. - */ - std::intmax_t unmap(const key_type & key) - { - std::intmax_t result = -1; - - auto it = m_mapping.find(key); - - if(it != m_mapping.end()) - { - result = static_cast(it->second); - m_pool.insert(it->second); - m_mapping.erase(it); - } - - return result; - } - - /** - * @brief The number of keys associated with a batch index - * - * @return std::size_t - */ - std::size_t getKeyCount() const noexcept - { - return m_mapping.size(); - } - - /** - * @brief The maximum number of batch index - * - * @return std::size_t - */ - std::size_t getMaxBatchSize() const noexcept - { - return m_maxBatchSize; - } - - /** - * @brief The number of sub-tensors - * - * @return std::size_t - */ - std::size_t getUnitCount() const - { - return m_size; - } - - /** - * Get the size of given dimension. - * @param dimIdx dimension index. - * @return size of the specified dimension. - */ - std::size_t getTensorSize(std::size_t unitIdx, std::size_t dimIdx) const - { - return m_buffer[unitIdx].getSize(dimIdx); - } - - /** - * Get the stride of given dimension. - * @param dimIdx dimension index. - * @return stride of the specified dimension. - */ - std::size_t getTensorStride(std::size_t unitIdx, std::size_t dimIdx) const - { - return m_buffer[unitIdx].getStride(dimIdx); - } - - template::type> - unit_type getUnit(std::size_t idx) - { - unit_type result{m_buffer[idx].getWidth(), - m_buffer[idx].getHeight(), - m_buffer[idx].getDepth(), - m_buffer[idx].getData(), - m_buffer[idx].isCPU()}; - return result; - } - - template::type> - unit_type getUnit(std::size_t idx, ChannelCount UNUSED = T) - { - unit_type result{m_buffer[idx].getWidth(), - m_buffer[idx].getHeight(), - m_buffer[idx].getDepth(), - m_buffer[idx].getChannelCount(), - m_buffer[idx].getData(), - m_buffer[idx].isCPU()}; - return result; - } - - template::type> - frame_type getFrame(const key_type & idx) - { - frame_type result; - - if(m_mapping.find(idx) != m_mapping.end()) - { - std::size_t at = m_mapping[idx]; - result = std::move(frame_type{m_buffer.getSize(), m_buffer.isCPU()}); - result.setSize(m_size); - for(std::size_t i = 0; i < m_size; ++i) - { - element_type element{m_buffer[i].getWidth(), - m_buffer[i].getHeight(), - m_buffer[i].getData() + - at * m_buffer[i].getStride(TensorDimension::DEPTH), - m_buffer[i].isCPU()}; - result[i] = std::move(element); - } - } - - return result; - } - - template::type> - frame_type getFrame(const key_type & idx, ChannelCount UNUSED = T) - { - frame_type result; - - if(m_mapping.find(idx) != m_mapping.end()) - { - std::size_t at = m_mapping[idx]; - result = std::move(frame_type{m_buffer.getSize(), m_buffer.isCPU()}); - result.setSize(m_size); - for(std::size_t i = 0; i < m_size; ++i) - { - element_type element{m_buffer[i].getWidth(), - m_buffer[i].getHeight(), - m_buffer[i].getChannelCount(), - m_buffer[i].getData() + - at * m_buffer[i].getStride(TensorDimension::DEPTH), - m_buffer[i].isCPU()}; - result[i] = std::move(element); - } - } - - return result; - } - - template::type> - frame_type getFrame(key_type && idx) - { - frame_type result; - - if(m_mapping.find(idx) != m_mapping.end()) - { - std::size_t at = m_mapping[idx]; - result = std::move(frame_type{m_buffer.getSize(), m_buffer.isCPU()}); - result.setSize(m_size); - for(std::size_t i = 0; i < m_size; ++i) - { - element_type element{m_buffer[i].getWidth(), - m_buffer[i].getHeight(), - m_buffer[i].getData() + - at * m_buffer[i].getStride(TensorDimension::DEPTH), - m_buffer[i].isCPU()}; - result[i] = std::move(element); - } - } - - return result; - } - - template::type> - frame_type getFrame(key_type && idx, ChannelCount UNUSED = T) - { - frame_type result; - - if(m_mapping.find(idx) != m_mapping.end()) - { - std::size_t at = m_mapping[idx]; - result = std::move(frame_type{m_buffer.getSize(), m_buffer.isCPU()}); - result.setSize(m_size); - for(std::size_t i = 0; i < m_size; ++i) - { - element_type element{m_buffer[i].getWidth(), - m_buffer[i].getHeight(), - m_buffer[i].getChannelCount(), - m_buffer[i].getData() + - at * m_buffer[i].getStride(TensorDimension::DEPTH), - m_buffer[i].isCPU()}; - result[i] = std::move(element); - } - } - - return result; - } - - template::type> - element_type getElement(const key_type & keyIdx, std::size_t unitIdx) - { - element_type element; - - if(m_mapping.find(keyIdx) != m_mapping.end()) - { - std::size_t at = m_mapping[keyIdx]; - element = std::move(element_type{m_buffer[unitIdx].getWidth(), - m_buffer[unitIdx].getHeight(), - m_buffer[unitIdx].getData() + - at * m_buffer[unitIdx].getStride(TensorDimension::DEPTH), - m_buffer[unitIdx].isCPU()}); - } - - return element; - } - - template::type> - element_type getElement(const key_type & keyIdx, std::size_t unitIdx, ChannelCount UNUSED = T) - { - element_type element; - - if(m_mapping.find(keyIdx) != m_mapping.end()) - { - std::size_t at = m_mapping[keyIdx]; - element = std::move(element_type{m_buffer[unitIdx].getWidth(), - m_buffer[unitIdx].getHeight(), - m_buffer[unitIdx].getChannelCount(), - m_buffer[unitIdx].getData() + - at * m_buffer[unitIdx].getStride(TensorDimension::DEPTH), - m_buffer[unitIdx].isCPU()}); - } - - return element; - } - - template::type> - element_type getElement(key_type && keyIdx, std::size_t unitIdx) - { - element_type element; - - if(m_mapping.find(keyIdx) != m_mapping.end()) - { - std::size_t at = m_mapping[keyIdx]; - element = std::move(element_type{m_buffer[unitIdx].getWidth(), - m_buffer[unitIdx].getHeight(), - m_buffer[unitIdx].getData() + - at * m_buffer[unitIdx].getStride(TensorDimension::DEPTH), - m_buffer[unitIdx].isCPU()}); - } - - return element; - } - - template::type> - element_type getElement(key_type && keyIdx, std::size_t unitIdx, ChannelCount UNUSED = T) - { - element_type element; - - if(m_mapping.find(keyIdx) != m_mapping.end()) - { - std::size_t at = m_mapping[keyIdx]; - element = std::move(element_type{m_buffer[unitIdx].getWidth(), - m_buffer[unitIdx].getHeight(), - m_buffer[unitIdx].getChannelCount(), - m_buffer[unitIdx].getData() + - at * m_buffer[unitIdx].getStride(TensorDimension::DEPTH), - m_buffer[unitIdx].isCPU()}); - } - - return element; - } - - /** - * Get the ChannelType of the Tensor. - * @return ChannelType of the Tensor. - */ - constexpr ChannelType getType() const noexcept - { - return CT; - } - - /** - * Get the flag whether the Tensor is allocated in CPU or GPU. - * @return whether the Tensor is allocated in CPU. - */ - bool isCPU() const noexcept - { - return m_isCPU; - } - - private: - // Mapping and Pool form a unique-to-unique isometry between - // the keys and indices of the batch dimension - mutable std::unordered_map m_mapping; - mutable std::set m_pool; - - std::size_t m_maxBatchSize; - std::size_t m_size; - bool m_isCPU; - - buffer_type m_buffer; -}; - -} // namespace cvcore - -#endif // CVCORE_TENSORMAP_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Traits.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Traits.h deleted file mode 100644 index a78b780..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/core/Traits.h +++ /dev/null @@ -1,478 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_TRAITS_H -#define CVCORE_TRAITS_H - -#include - -#include "MathTypes.h" -#include "Tensor.h" - -namespace cvcore { namespace traits { - -// ----------------------------------------------------------------------------- -// Type Properties -// ----------------------------------------------------------------------------- - -template -struct is_tensor : std::false_type -{ -}; - -template -struct is_tensor> : std::true_type -{ -}; - -template -struct is_planar : std::false_type -{ - static_assert(is_tensor::value, ""); -}; - -template -struct is_planar> : std::integral_constant -{ -}; - -template -struct is_interleaved : std::false_type -{ - static_assert(is_tensor::value, ""); -}; - -template -struct is_interleaved> : std::integral_constant -{ -}; - -template -struct is_batch : std::false_type -{ - static_assert(is_tensor::value, ""); -}; - -template -struct is_batch> : std::integral_constant -{ -}; - -// ----------------------------------------------------------------------------- -// Type Modifications -// ----------------------------------------------------------------------------- - -template -struct to_planar -{ - static_assert(is_tensor::value, ""); - - using type = TensorType; -}; - -template -struct to_planar> -{ - using type = Tensor; -}; - -template -struct to_planar> -{ - using type = Tensor; -}; - -template -using to_planar_t = typename to_planar::type; - -template -struct to_interleaved -{ - static_assert(is_tensor::value, ""); - - using type = TensorType; -}; - -template -struct to_interleaved> -{ - using type = Tensor; -}; - -template -struct to_interleaved> -{ - using type = Tensor; -}; - -template -struct to_interleaved> -{ - using type = Tensor; -}; - -template -using to_interleaved_t = typename to_interleaved::type; - -template -struct add_batch -{ - static_assert(is_tensor::value, ""); - - using type = TensorType; -}; - -template -struct add_batch> -{ - using type = Tensor; -}; - -template -struct add_batch> -{ - using type = Tensor; -}; - -template -using add_batch_t = typename add_batch::type; - -template -struct remove_batch -{ - static_assert(is_tensor::value, ""); - - using type = TensorType; -}; - -template -struct remove_batch> -{ - using type = Tensor; -}; - -template -struct remove_batch> -{ - using type = Tensor; -}; - -template -struct remove_batch> -{ - using type = Tensor; -}; - -template -using remove_batch_t = typename remove_batch::type; - -template -struct to_c1 -{ - static_assert(is_tensor::value, ""); -}; - -template -struct to_c1> -{ - using type = Tensor; -}; - -template -using to_c1_t = typename to_c1::type; - -template -struct to_c2 -{ - static_assert(is_tensor::value, ""); -}; - -template -struct to_c2> -{ - using type = Tensor; -}; - -template -using to_c2_t = typename to_c2::type; - -template -struct to_c3 -{ - static_assert(is_tensor::value, ""); -}; - -template -struct to_c3> -{ - using type = Tensor; -}; - -template -using to_c3_t = typename to_c3::type; - -template -struct to_c4 -{ - static_assert(is_tensor::value, ""); -}; - -template -struct to_c4> -{ - using type = Tensor; -}; - -template -using to_c4_t = typename to_c4::type; - -template -struct to_cx -{ - static_assert(is_tensor::value, ""); -}; - -template -struct to_cx> -{ - using type = Tensor; -}; - -template -using to_cx_t = typename to_cx::type; - -template -struct to_u8 -{ - static_assert(is_tensor::value, ""); -}; - -template -struct to_u8> -{ - using type = Tensor; -}; - -template -using to_u8_t = typename to_u8::type; - -template -struct to_u16 -{ - static_assert(is_tensor::value, ""); -}; - -template -struct to_u16> -{ - using type = Tensor; -}; - -template -using to_u16_t = typename to_u16::type; - -template -struct to_f16 -{ - static_assert(is_tensor::value, ""); -}; - -template -struct to_f16> -{ - using type = Tensor; -}; - -template -using to_f16_t = typename to_f16::type; - -template -struct to_f32 -{ - static_assert(is_tensor::value, ""); -}; - -template -struct to_f32> -{ - using type = Tensor; -}; - -template -using to_f32_t = typename to_f32::type; - -template -using to_c1u8 = to_c1>; - -template -using to_c1u16 = to_c1>; - -template -using to_c1f16 = to_c1>; - -template -using to_c1f32 = to_c1>; - -template -using to_c2u8 = to_c2>; - -template -using to_c2u16 = to_c2>; - -template -using to_c2f16 = to_c2>; - -template -using to_c2f32 = to_c2>; - -template -using to_c3u8 = to_c3>; - -template -using to_c3u16 = to_c3>; - -template -using to_c3f16 = to_c3>; - -template -using to_c3f32 = to_c3>; - -template -using to_c4u8 = to_c4>; - -template -using to_c4u16 = to_c4>; - -template -using to_c4f16 = to_c4>; - -template -using to_c4f32 = to_c4>; - -template -using to_cxu8 = to_cx>; - -template -using to_cxu16 = to_cx>; - -template -using to_cxf16 = to_cx>; - -template -using to_cxf32 = to_cx>; - -template -using to_c1u8_t = to_c1_t>; - -template -using to_c1u16_t = to_c1_t>; - -template -using to_c1f16_t = to_c1_t>; - -template -using to_c1f32_t = to_c1_t>; - -template -using to_c2u8_t = to_c2_t>; - -template -using to_c2u16_t = to_c2_t>; - -template -using to_c2f16_t = to_c2_t>; - -template -using to_c2f32_t = to_c2_t>; - -template -using to_c3u8_t = to_c3_t>; - -template -using to_c3u16_t = to_c3_t>; - -template -using to_c3f16_t = to_c3_t>; - -template -using to_c3f32_t = to_c3_t>; - -template -using to_c4u8_t = to_c4_t>; - -template -using to_c4u16_t = to_c4_t>; - -template -using to_c4f16_t = to_c4_t>; - -template -using to_c4f32_t = to_c4_t>; - -template -using to_cxu8_t = to_cx_t>; - -template -using to_cxu16_t = to_cx_t>; - -template -using to_cxf16_t = to_cx_t>; - -template -using to_cxf32_t = to_cx_t>; - -template -struct get_dim; - -template<> -struct get_dim -{ - static constexpr int value = 1; -}; - -template<> -struct get_dim -{ - static constexpr int value = 1; -}; - -template<> -struct get_dim -{ - static constexpr int value = 2; -}; - -template<> -struct get_dim -{ - static constexpr int value = 2; -}; - -template<> -struct get_dim -{ - static constexpr int value = 3; -}; - -template<> -struct get_dim -{ - static constexpr int value = 3; -}; -}} // namespace cvcore::traits - -#endif // CVCORE_TRAITS_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/BBoxUtils.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/BBoxUtils.h deleted file mode 100644 index 624139b..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/BBoxUtils.h +++ /dev/null @@ -1,135 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_BBOX_UTILS_H -#define CVCORE_BBOX_UTILS_H - -#include "cv/core/BBox.h" - -namespace cvcore { namespace tensor_ops { - -/** - * An enum. - * Enum type for Bounding Box interpolation - */ -enum BBoxInterpolationType -{ - CONST_INTERPOLATION, /**< Uses constant value interpolation */ - IMAGE_INTERPOLATION, /**< Interpolates based on image size */ -}; - -/** - * An enum. - * Enum type for Bounding Box scaling operation. - */ -enum BBoxScaleType -{ - NORMAL, /**< Scale box without fixing center point. */ - CENTER /**< Scale box with center fixed. */ -}; - -/** - * Function to calculate the intersection of two bounding boxes. - * @param a one of the BBox. - * @param b the other BBox. - * @return intersection area of the two bounding boxes. - */ -float GetIntersection(const BBox &a, const BBox &b); - -/** - * Function to calculate the union of two bounding boxes. - * @param a one of the BBox. - * @param b the other BBox. - * @return union area of the two bounding boxes. - */ -float GetUnion(const BBox &a, const BBox &b); - -/** - * Function to calculate the IoU (Intersection over Union) of two bounding boxes. - * @param a one of the BBox. - * @param b the other BBox. - * @return IoU of the two bounding boxes. - */ -float GetIoU(const BBox &a, const BBox &b); - -/** - * Function to merge two BBox together. - * @param a one of the BBox. - * @param b the other BBox. - * @return Merged bounding box. - */ -BBox MergeBoxes(const BBox &a, const BBox &b); - -/** - * Clamp BBox. - * @param a BBox to be clamped. - * @param b boundary BBox. - * @return clamped BBox. - */ -BBox ClampBox(const BBox &a, const BBox &b); - -/** - * Interpolate bounding boxes. - * @param currLeft left x coordinate. - * @param currRight right x coordinate. - * @param currBottom bottom y coordinate. - * @param currTop top y coordiante. - * @param xScaler scale ratio along width direction. - * @param yScaler scale ratio along height direction. - * @param currColumn current column index. - * @param currRow current row index. - * @param type bbox interpolation type. - * @param bboxNorm bounding box scaled factor. - * @return interpolated BBox. - */ -BBox InterpolateBoxes(float currLeft, float currRight, float currBottom, float currTop, float xScaler, float yScaler, - int currColumn, int currRow, BBoxInterpolationType type = IMAGE_INTERPOLATION, - float bboxNorm = 1.0); - -/** - * Scale BBox. - * @param bbox input BBox. - * @param xScaler scale ratio along width direction. - * @param yScaler scale ratio along height direction. - * @param type BBox scaling type. - * @return scaled BBox. - */ -BBox ScaleBox(const BBox &bbox, float xScaler, float yScaler, BBoxScaleType type = NORMAL); - -/** - * Transform BBox. - * @param bbox input BBox. - * @param xScaler scale ratio along width direction. - * @param yScaler scale ratio along height direction. - * @param xOffset offset along width direction in pixels. - * @param yOffset offset along height direction in pixels. - * @return transformed BBox. - */ -BBox TransformBox(const BBox &bbox, float xScaler, float yScaler, float xOffset, float yOffset); - -/** - * Squarify BBox. - * @param box input BBox. - * @param boundary boundary BBox used for clamping. - * @param scale scaling factor. - * @return squarified BBox. - */ -BBox SquarifyBox(const BBox &box, const BBox &boundary, float scale); - -}} // namespace cvcore::tensor_ops - -#endif // CVCORE_BBOX_UTILS_H \ No newline at end of file diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/DBScan.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/DBScan.h deleted file mode 100644 index 24b4dc4..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/DBScan.h +++ /dev/null @@ -1,91 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_DBSCAN_H -#define CVCORE_DBSCAN_H - -#include "cv/core/Array.h" -#include "cv/core/BBox.h" - -namespace cvcore { namespace tensor_ops { - -/** - * An enum. - * Enum type for BBox merge type. - */ -enum BBoxMergeType -{ - MAXIMUM, /**< merge by expanding bounding boxes */ - WEIGHTED, /**< weighted by confidence scores. */ -}; - -/** - * DBScan implementation used for post-processing of object detection. - */ -class DBScan -{ -public: - /** - * DBScan constructor. - * @param pointsize size of the input array. - * @param minPoints minimum number of neighbors within the radius. - * @param epsilon radius of neighborhood around a point. - */ - DBScan(int pointsSize, int minPoints, float epsilon); - - /** - * Run DBScan cluster and return the cluster indices. - * @param input input unclustered BBoxes array. - * @param clusters output array containing cluster ids. - */ - void doCluster(Array &input, Array &clusters); - - /** - * Run DBScan cluster and return the cluster bboxes. - * @param input input unclustered BBoxes array. - * @param output output clustered BBoxes array. - * @param type bbox merge type - */ - void doClusterAndMerge(Array &input, Array &output, BBoxMergeType type = MAXIMUM); - - /** - * Run DBScan cluster and return the cluster bboxes weighted on input weights. - * @param input input unclustered BBoxes array. - * @param weights weights needed for merging clusterd bboxes. - * @param output output clustered BBoxes array. - * @param type bbox merge type - */ - void doClusterAndMerge(Array &input, Array &weights, Array &output, - BBoxMergeType type = WEIGHTED); - - /** - * Get the number of clusters. - * @return number of clusters. - */ - int getNumClusters() const; - -private: - int m_pointsSize; - int m_numClusters; - int m_minPoints; - float m_epsilon; - Array m_clusterStates; -}; - -}} // namespace cvcore::tensor_ops - -#endif // CVCORE_DBSCAN_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/Errors.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/Errors.h deleted file mode 100644 index beebce4..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/Errors.h +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_ERRORS_H -#define CVCORE_ERRORS_H - -#include "cv/core/CVError.h" - -namespace cvcore { namespace tensor_ops { - -// TODO: Add doxygen style comments for each of the error codes -enum class TensorOpsErrorCode : std::int32_t -{ - SUCCESS = 0, - COMPUTE_ENGINE_UNSUPPORTED_BY_CONTEXT, - CAMERA_DISTORTION_MODEL_UNSUPPORTED -}; - -}} // namespace cvcore::tensor_ops - -// WARNING: Extending base C++ namespace to cover cvcore error codes -namespace std { - -template <> -struct is_error_code_enum : true_type {}; - -} // namespace std - -namespace cvcore { namespace tensor_ops { - -std::error_code make_error_code(TensorOpsErrorCode) noexcept; - -}} // namespace cvcore::tensor_ops - -#endif // CVCORE_ERRORS_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/IImageWarp.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/IImageWarp.h deleted file mode 100644 index 4a81e1a..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/IImageWarp.h +++ /dev/null @@ -1,63 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_IIMAGEWARP_H -#define CVCORE_IIMAGEWARP_H - -#include -#include -#include - -namespace cvcore { namespace tensor_ops { - -struct ImageGrid -{ - static constexpr std::size_t MAX_HORIZ_REGIONS = 4; - static constexpr std::size_t MAX_VERT_REGIONS = 4; - static constexpr std::size_t MIN_REGION_WIDTH = 64; - static constexpr std::size_t MIN_REGION_HIGHT = 16; - - std::int8_t numHorizRegions{0}; - std::int8_t numVertRegions{0}; - std::array horizInterval; - std::array vertInterval; - std::array regionHeight; - std::array regionWidth; -}; - -class IImageWarp -{ - public: - // Public Destructor - virtual ~IImageWarp() = 0; - - protected: - // Protected Constructor(s) - IImageWarp() = default; - IImageWarp(const IImageWarp &) = default; - IImageWarp(IImageWarp &&) noexcept = default; - - // Protected Operator(s) - IImageWarp &operator=(const IImageWarp &) = default; - IImageWarp &operator=(IImageWarp &&) noexcept = default; -}; - -using ImageWarp = IImageWarp*; - -}} // namespace cvcore::tensor_ops - -#endif // CVCORE_IIMAGEWARP_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/ITensorOperatorContext.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/ITensorOperatorContext.h deleted file mode 100644 index 5aa6cdb..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/ITensorOperatorContext.h +++ /dev/null @@ -1,65 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_ITENSOROPERATORCONTEXT_H -#define CVCORE_ITENSOROPERATORCONTEXT_H - -#include - -#include "ITensorOperatorStream.h" -#include "cv/core/CVError.h" -#include "cv/core/ComputeEngine.h" - -namespace cvcore { namespace tensor_ops { - -enum class TensorBackend : std::uint8_t -{ - NPP, - VPI, - DALI -}; - -class ITensorOperatorContext -{ -public: - // Public Constructor(s)/Destructor - virtual ~ITensorOperatorContext() noexcept = default; - - // Public Accessor Method(s) - virtual std::error_code CreateStream(TensorOperatorStream &, const ComputeEngine &) = 0; - virtual std::error_code DestroyStream(TensorOperatorStream &) = 0; - - virtual bool IsComputeEngineCompatible(const ComputeEngine &) const noexcept = 0; - - virtual TensorBackend Backend() const noexcept = 0; - -protected: - // Protected Constructor(s) - ITensorOperatorContext() = default; - ITensorOperatorContext(const ITensorOperatorContext &) = default; - ITensorOperatorContext(ITensorOperatorContext &&) noexcept = default; - - // Protected Operator(s) - ITensorOperatorContext &operator=(const ITensorOperatorContext &) = default; - ITensorOperatorContext &operator=(ITensorOperatorContext &&) noexcept = default; -}; - -using TensorOperatorContext = ITensorOperatorContext *; - -}} // namespace cvcore::tensor_ops - -#endif // CVCORE_ITENSOROPERATORCONTEXT_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/ITensorOperatorStream.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/ITensorOperatorStream.h deleted file mode 100644 index c4de9df..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/ITensorOperatorStream.h +++ /dev/null @@ -1,251 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_ITENSOROPERATORSTREAM_H -#define CVCORE_ITENSOROPERATORSTREAM_H - -#include -#include -#include -#include - -#include "cv/core/CameraModel.h" -#include "cv/core/ComputeEngine.h" -#include "cv/core/Image.h" - -#include "Errors.h" -#include "IImageWarp.h" -#include "ImageUtils.h" -namespace cvcore { namespace tensor_ops { - -class NotImplementedException : public std::logic_error -{ -public: - NotImplementedException() - : std::logic_error("Method not yet implemented.") - { - } -}; - -class ITensorOperatorStream -{ -public: - // Public Constructor(s)/Destructor - virtual ~ITensorOperatorStream() noexcept = default; - - // Public Accessor Method(s) - virtual std::error_code Status() noexcept = 0; - - virtual std::error_code GenerateWarpFromCameraModel(ImageWarp & warp, - const ImageGrid & grid, - const CameraModel & source, - const CameraIntrinsics & target) = 0; - - virtual std::error_code DestroyWarp(ImageWarp & warp) noexcept = 0; - - // Public Mutator Method(s) - virtual std::error_code Remap(Image &outputImage, const Image &inputImage, - const ImageWarp warp, - InterpolationType interpolation = INTERP_LINEAR, - BorderType border = BORDER_ZERO) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - virtual std::error_code Remap(Image &outputImage, const Image &inputImage, - const ImageWarp warp, - InterpolationType interpolation = INTERP_LINEAR, - BorderType border = BORDER_ZERO) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code Remap(Image &outputImage, const Image &inputImage, - const ImageWarp warp, - InterpolationType interpolation = INTERP_LINEAR, - BorderType border = BORDER_ZERO) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code Remap(Image &outputImage, const Image &inputImage, - const ImageWarp warp, - InterpolationType interpolation = INTERP_LINEAR, - BorderType border = BORDER_ZERO) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation = INTERP_LINEAR, BorderType border = BORDER_ZERO) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - virtual std::error_code Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation = INTERP_LINEAR, BorderType border = BORDER_ZERO) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - virtual std::error_code Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation = INTERP_LINEAR, BorderType border = BORDER_ZERO) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - virtual std::error_code Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation = INTERP_LINEAR, BorderType border = BORDER_ZERO) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - virtual std::error_code Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation = INTERP_LINEAR, BorderType border = BORDER_ZERO) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code Normalize(Image &outputImage, const Image &inputImage - /* only configuration parameters */) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - virtual std::error_code Normalize(Image &outputImage, const Image &inputImage - /* only configuration parameters */) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - virtual std::error_code Normalize(Image &outputImage, const Image &inputImage - /* only configuration parameters */) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - virtual std::error_code Normalize(Image &outputImage, const Image &inputImage - /* only configuration parameters */) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code StereoDisparityEstimator(Image &outputImage, const Image &inputLeftImage, - const Image &inputRightImage, size_t windowSize, - size_t maxDisparity) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code StereoDisparityEstimator(Image &outputImage, const Image &inputLeftImage, - const Image &inputRightImage, size_t windowSize, - size_t maxDisparity) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - - virtual std::error_code StereoDisparityEstimator(Image &outputImage, const Image &inputLeftImage, - const Image &inputRightImage, size_t windowSize, - size_t maxDisparity) - { - throw NotImplementedException(); - return make_error_code(ErrorCode::NOT_IMPLEMENTED); - } - -protected: - // Protected Constructor(s) - ITensorOperatorStream() = default; - ITensorOperatorStream(const ITensorOperatorStream &) = default; - ITensorOperatorStream(ITensorOperatorStream &&) noexcept = default; - - // Protected Operator(s) - ITensorOperatorStream &operator=(const ITensorOperatorStream &) = default; - ITensorOperatorStream &operator=(ITensorOperatorStream &&) noexcept = default; -}; - -using TensorOperatorStream = ITensorOperatorStream *; - -}} // namespace cvcore::tensor_ops - -#endif // CVCORE_ITENSOROPERATORSTREAM_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/ImageUtils.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/ImageUtils.h deleted file mode 100644 index e46cd42..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/ImageUtils.h +++ /dev/null @@ -1,1091 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_IMAGE_UTILS_H -#define CVCORE_IMAGE_UTILS_H - -#include - -#include - -#include "cv/core/BBox.h" -#include "cv/core/Tensor.h" - -namespace cvcore { namespace tensor_ops { - -/** - * An enum. - * Enum type for color conversion type. - */ -enum ColorConversionType -{ - BGR2RGB, /**< convert BGR to RGB. */ - RGB2BGR, /**< convert RGB to BGR. */ - BGR2GRAY, /**< convert BGR to Grayscale. */ - RGB2GRAY, /**< convert RGB to Grayscale. */ - GRAY2BGR, /**< convert Grayscale to BGR. */ - GRAY2RGB, /**< convert Grayscale to RGB. */ -}; - -/** - * An enum. - * Enum type for resize interpolation type. - */ -enum InterpolationType -{ - INTERP_NEAREST, /**< nearest interpolation. */ - INTERP_LINEAR, /**< linear interpolation. */ - INTERP_CUBIC_BSPLINE, /**< cubic bspline interpolation. */ - INTERP_CUBIC_CATMULLROM /**< cubic catmullrom interpolation. */ -}; - -/** - * An enum. - * Enum type for resize interpolation type. - */ -enum BorderType -{ - BORDER_ZERO, - BORDER_REPEAT, - BORDER_REVERSE, - BORDER_MIRROR -}; - -// please note the following functions all require GPU Tensors - -/** - * Image resizing for one channel HWC format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image batch resizing for one channel NHWC uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of interest for one channel HWC format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param dstROI destination crop region. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of interest for one channel HWC format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing for one channel HWC format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image batch resizing for one channel HWC format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of interest for one channel HWC format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param dstROI destination crop region. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of interest for one channel HWC format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing for one channel HWC format FP32 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image batch resizing for one channel HWC format FP32 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of interest for one channel HWC format FP32 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param dstROI destination crop region. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of interest for one channel HWC format FP32 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing for one channel CHW format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of interest for one channel CHW format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param dstROI destination crop region. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of interest for one channel CHW format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing for one channel CHW format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of interest for one channel CHW format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param dstROI destination crop region. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of interest for one channel CHW format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing for one channel CHW format FP32 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of interest for one channel CHW format FP32 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param dstROI destination crop region. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of interest for one channel CHW format FP32 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing for three channels interleaved uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image batch resizing for three channels interleaved uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of intesrest for three channel HWC format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param dstROI destination crop region. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of intesrest for three channel HWC format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing for three channels interleaved uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image batch resizing for three channels interleaved uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of intesrest for three channel HWC format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param dstROI destination crop region. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of intesrest for three channel HWC format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing for three channels interleaved float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image batch resizing for three channels interleaved float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type interpolation type. - * @param keep_aspect_ratio whether to keep aspect ratio. - * @param stream specified cuda stream. - */ -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio = false, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of intesrest for three channel HWC format float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param dstROI destination crop region. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Image resizing of a region of intesrest for three channel HWC format float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Crop a region of interest for one channel HWC format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param stream specified cuda stream. - */ -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream = 0); - -/** - * Crop a region of interest for one channel HWC format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param stream specified cuda stream. - */ -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream = 0); - -/** - * Crop a region of interest for one channel HWC format float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param stream specified cuda stream. - */ -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream = 0); - -/** - * Crop a region of interest for one channel CHW format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param stream specified cuda stream. - */ -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream = 0); - -/** - * Crop a region of interest for one channel CHW format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param stream specified cuda stream. - */ -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream = 0); - -/** - * Crop a region of interest for one channel CHW format float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param stream specified cuda stream. - */ -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream = 0); - -/** - * Crop a region of interest for three channels HWC format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param stream specified cuda stream. - */ -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream = 0); - -/** - * Crop a region of interest for three channels HWC format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param stream specified cuda stream. - */ -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream = 0); - -/** - * Crop a region of interest for three channels HWC format float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param srcROI source crop region. - * @param stream specified cuda stream. - */ -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream = 0); - -/** - * Apply a perspective transformation to one channel HWC format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param coeffs 3x3 transformation matrix. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void WarpPerspective(Tensor &dst, const Tensor &src, const double coeffs[3][3], - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Apply a perspective transformation to one channel HWC format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param coeffs 3x3 transformation matrix. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void WarpPerspective(Tensor &dst, const Tensor &src, const double coeffs[3][3], - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Apply a perspective transformation to one channel HWC format float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param coeffs 3x3 transformation matrix. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void WarpPerspective(Tensor &dst, const Tensor &src, const double coeffs[3][3], - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Apply a perspective transformation to three channels HWC format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param coeffs 3x3 transformation matrix. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void WarpPerspective(Tensor &dst, const Tensor &src, const double coeffs[3][3], - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Apply a perspective transformation to three channels HWC format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param coeffs 3x3 transformation matrix. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void WarpPerspective(Tensor &dst, const Tensor &src, const double coeffs[3][3], - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** - * Apply a perspective transformation to three channels HWC format float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param coeffs 3x3 transformation matrix. - * @param type interpolation type. - * @param stream specified cuda stream. - */ -void WarpPerspective(Tensor &dst, const Tensor &src, const double coeffs[3][3], - InterpolationType type = INTERP_LINEAR, cudaStream_t stream = 0); - -/** Color conversion between two three channels interleaved uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type color conversion type. - * @param stream specified cuda stream. - */ -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream = 0); - -/** Batch color conversion between three channels interleaved uint_8 type Tensors. - * @param dst destination tensor. - * @param src source tensor. - * @param type color conversion type. - * @param stream specified cuda stream. - */ -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream = 0); - -/** Color conversion between two three channels interleaved uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type color conversion type. - * @param stream specified cuda stream. - */ -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream = 0); - -/** Batch color conversion between three channels interleaved uint_16 type Tensors. - * @param dst destination tensor. - * @param src source tensor. - * @param type color conversion type. - * @param stream specified cuda stream. - */ -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream = 0); - -/** Color conversion between two three channels interleaved float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type color conversion type. - * @param stream specified cuda stream. - */ -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream = 0); - -/** Batch color conversion between three channels interleaved float type Tensors. - * @param dst destination tensor. - * @param src source tensor. - * @param type color conversion type. - * @param stream specified cuda stream. - */ -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream = 0); - -/** Color conversion from three channels interleaved uint_8 type Tensor to one channel Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type color conversion type. - * @param stream specified cuda stream. - */ -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream = 0); - -/** Color conversion from three channels interleaved uint_16 type Tensor to one channel Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type color conversion type. - * @param stream specified cuda stream. - */ -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream = 0); - -/** Color conversion from three channels interleaved float type Tensor to one channel Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type color conversion type. - * @param stream specified cuda stream. - */ -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream = 0); - -/** Color conversion from one channel interleaved uint_8 type Tensor to three channels Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type color conversion type. - * @param stream specified cuda stream. - */ -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream = 0); - -/** Color conversion from one channel interleaved uint_16 type Tensor to three channels Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type color conversion type. - * @param stream specified cuda stream. - */ -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream = 0); - -/** Color conversion from one channel interleaved float type Tensor to three channels Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param type color conversion type. - * @param stream specified cuda stream. - */ -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream = 0); - -/** Convert bit depth from F32 to U8 of a single channel channel Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param scale multiply the pixel values by a factor. - * @param stream specified cuda stream. - */ -void ConvertBitDepth(Tensor &dst, Tensor &src, const float scale, cudaStream_t stream = 0); - -/** Convert bit depth from F32 to U8 of a N * single channel Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param scale multiply the pixel values by a factor. - * @param stream specified cuda stream. - */ -void ConvertBitDepth(Tensor &dst, Tensor &src, const float scale, cudaStream_t stream = 0); - -/** - * Normalization for three channels interleaved uint8_t type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale[3], const float offset[3], - cudaStream_t stream = 0); - -/** - * Batch normalization for three channels interleaved uint8_t type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale[3], const float offset[3], - cudaStream_t stream = 0); - -/** - * Normalization for three channels interleaved uint16_t type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale[3], const float offset[3], - cudaStream_t stream = 0); - -/** - * Batch normalization for three channels interleaved uint16_t type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale[3], - const float offset[3], cudaStream_t stream = 0); - -/** - * Normalization for three channels interleaved float type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale[3], const float offset[3], - cudaStream_t stream = 0); - -/** - * Batch normalization for three channels interleaved float type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale[3], - const float offset[3], cudaStream_t stream = 0); - -/** - * Normalization for one channel interleaved uint8_t type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream = 0); - -/** - * Batch normalization for one channel interleaved uint8_t type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream = 0); - -/** - * Normalization for one channel interleaved uint16_t type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream = 0); - -/** - * Batch normalization for one channel interleaved uint16_t type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream = 0); - -/** - * Normalization for one channel interleaved float type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream = 0); - -/** - * Batch normalization for one channel interleaved float type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream = 0); - -/** - * Normalization for one channel planar uint8_t type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream = 0); - -/** - * Batch normalization for one channel planar uint8_t type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream = 0); - -/** - * Normalization for one channel planar uint16_t type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream = 0); - -/** - * Batch normalization for one channel planar uint16_t type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream = 0); - -/** - * Normalization for one channel planar float type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream = 0); - -/** - * Batch normalization for one channel planar float type Tensor. - * Each element x will be transformed to (x + offset) * scale - * @param dst destination tensor. - * @param src source tensor. - * @param scale scaling factor for normalization. - * @param offset offset constant for normalization. - * @stream specified cuda stream. - */ -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream = 0); - -/** - * Convert interleaved image to planar image for three channels uint8_t type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param stream specified cuda stream. - */ -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream = 0); - -/** - * Batch convert interleaved image to planar image for three channels uint8_t type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param stream specified cuda stream. - */ -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream = 0); - -/** - * Convert interleaved image to planar image for three channels uint16_t type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param stream specified cuda stream. - */ -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream = 0); - -/** - * Batch Convert interleaved image to planar image for three channels uint16_t type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param stream specified cuda stream. - */ -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream = 0); - -/** - * Convert interleaved image to planar image for three channels float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param stream specified cuda stream. - */ -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream = 0); - -/** - * Batch convert interleaved image to planar image for three channels float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param stream specified cuda stream. - */ -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream = 0); - -/** - * Convert interleaved image to planar image for single channel uint8_t type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param stream specified cuda stream. - */ -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream = 0); - -/** - * Batch convert interleaved image to planar image for single channel uint8_t type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param stream specified cuda stream. - */ -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream = 0); - -/** - * Convert interleaved image to planar image for single channel uint16_t type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param stream specified cuda stream. - */ -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream = 0); - -/** - * Batch Convert interleaved image to planar image for single channel uint16_t type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param stream specified cuda stream. - */ -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream = 0); - -/** - * Convert interleaved image to planar image for single channel float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param stream specified cuda stream. - */ -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream = 0); - -/** - * Batch convert interleaved image to planar image for single channel float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param stream specified cuda stream. - */ -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream = 0); - -/** - * Combines various functions to imitate OpenCV blobFromImage() for various type tensor input. - * @tparam TL_IN input tensor layout (HWC/NHWC). - * @tparam TL_OUT output tensor layout(CHW/NCHW). - * @tparam CC channel count. - * @tparam CT channel type for input tensor (output is fixed: F32). - */ -template -class ImageToNormalizedPlanarTensorOperator -{ -public: - /** - * Implementation for ImageToNormalizedPlanarTensorOperator. - */ - struct ImageToNormalizedPlanarTensorOperatorImpl; - - /** - * Constructor for HWC -> CHW tensors. - */ - template::type * = nullptr> - ImageToNormalizedPlanarTensorOperator(int width, int height); - - /** - * Constructor for NHWC -> NCHW tensors. - */ - template::type * = nullptr> - ImageToNormalizedPlanarTensorOperator(int width, int height, int depth); - - /** - * Destructor for ImageToNormalizedPlanarTensorOperator. - */ - ~ImageToNormalizedPlanarTensorOperator(); - - /** - * Run the composite operations on three channels tensors. - * @param dst destination tensor. - * @param src source tensor. - * @param scale scale factor for normalization. - * @param offset offset constant for normalization. - * @param swapRB whether to swap the first and last channels. - * @param keep_aspect_ratio whether to keep aspect ratio when resizing. - * @param stream specified cuda stream. - */ - template::type * = nullptr> - void operator()(Tensor &dst, const Tensor &src, const float scale[3], - const float offset[3], bool swapRB, bool keep_aspect_ratio = false, cudaStream_t stream = 0); - - /** - * Run the composite operations on single channel tensors. - * @param dst destination tensor. - * @param src source tensor. - * @param scale scale factor for normalization. - * @param offset offset constant for normalization. - * @param keep_aspect_ratio whether to keep aspect ratio when resizing. - * @param stream specified cuda stream. - */ - template::type * = nullptr> - void operator()(Tensor &dst, const Tensor &src, float scale, float offset, - bool keep_aspect_ratio = false, cudaStream_t stream = 0); - -private: - std::unique_ptr m_pImpl; -}; - -}} // namespace cvcore::tensor_ops - -#endif // CVCORE_IMAGE_UTILS_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/OneEuroFilter.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/OneEuroFilter.h deleted file mode 100644 index 4947042..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/OneEuroFilter.h +++ /dev/null @@ -1,82 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_ONEEUROFILTER_H -#define CVCORE_ONEEUROFILTER_H - -#include -#include - -#include "cv/core/CVError.h" - -namespace cvcore { namespace tensor_ops { - -/** - * Euro Filter Parameters - */ -struct OneEuroFilterParams -{ - float dataUpdateRate; /**< Data Update rate in Hz. */ - float minCutoffFreq; /**< Minimum Cut off frequency in Hz. */ - float cutoffSlope; /**< Beta or Speed coefficient which is a tuning parameter. */ - float derivCutoffFreq; /**< Cutoff frequency for derivative. */ -}; - -/* -The one euro filter is a low pass filter for filtering noisy signals in real-time. The filtering uses exponential smoothing where the smooting factor is computed dynamically using the input data update rate. The smoothing factor provides a trade off between slow speed jitter vs high speed lag. -There are two main tuning parameters for the filter, the speed coeffcient Beta and the minimum cut off frequency. -If high speed lag is a problem, increase beta; if slow speed jitter is a problem, decrease fcmin. -Reference : http://cristal.univ-lille.fr/~casiez/1euro/ -*/ -template -class OneEuroFilter -{ -public: - struct OneEuroFilterImpl; - /** - * Euro Filter Constructor. - * @param filterParams Filter parameters - */ - OneEuroFilter(const OneEuroFilterParams &filterParams); - - /** - * Reset Euro filter Parameters. - * @param filterParams Filter parameters - * @return Error code - */ - std::error_code resetParams(const OneEuroFilterParams &filterParams); - - /** - * Filter the input. Supports float, double, vector2f, Vector3f, Vector3d, Vector3f input types. - * @param inValue Noisy input to be filtered. - * @param filteredValue Filtered output - * @return Error code - */ - std::error_code execute(T &filteredValue, T inValue); - - ~OneEuroFilter(); - -private: - /** - * Implementation of EuroFilter. - */ - std::unique_ptr m_pImpl; -}; - -}} // namespace cvcore::tensor_ops - -#endif // CVCORE_ONEEUROFILTER_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/TensorOperators.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/TensorOperators.h deleted file mode 100644 index e32a3df..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/tensor_ops/TensorOperators.h +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_TENSOROPERATORS_H -#define CVCORE_TENSOROPERATORS_H - -#include -#include -#include - -#include "cv/tensor_ops/ITensorOperatorContext.h" -#include "cv/tensor_ops/ITensorOperatorStream.h" - -namespace cvcore { namespace tensor_ops { - -class TensorContextFactory -{ -public: - static std::error_code CreateContext(TensorOperatorContext &, TensorBackend backend); - static std::error_code DestroyContext(TensorOperatorContext &context); - - static bool IsBackendSupported(TensorBackend backend); - -private: - using MultitonType = std::unordered_map>>; - - static MultitonType instances; - static std::mutex instanceMutex; -}; - -}} // namespace cvcore::tensor_ops - -#endif // CVCORE_TENSOROPERATORS_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/trtbackend/TRTBackend.h b/isaac_ros_ess/gxf/ess/cvcore/include/cv/trtbackend/TRTBackend.h deleted file mode 100644 index 52b2e40..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/trtbackend/TRTBackend.h +++ /dev/null @@ -1,203 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_TRTBACKEND_H -#define CVCORE_TRTBACKEND_H - -#include -#include -#include -#include -#include - -#include - -namespace cvcore { - -/** - * Enum to specify different precision types. - */ -enum TRTBackendPrecision -{ - INT8, /**< 8-bit integer precision. */ - FP16, /**< 16-bit float precision. */ - FP32 /**< 32-bit float precision. */ -}; - -/** - * Enum to specify TensorRT blob layout type. - */ -enum TRTBackendBlobLayout -{ - PLANAR, /**< planar input type. */ - INTERLEAVED /**< interleaved input type. */ -}; - -/** - * Struct to store TensorRT blob dimensions. - */ -struct TRTBackendBlobSize -{ - int channels; /**< channels count. */ - int height; /**< blob height. */ - int width; /**< blob width. */ -}; - -/** - * Enum to specify model type. - */ -enum ModelType -{ - ONNX, /**< ONNX model. */ - UFF, /**< Uff model. */ - TRT_ENGINE, /**< serialized TensorRT engine. */ - TRT_ENGINE_IN_MEMORY /**< a memory pointer to deserialized TensorRT ICudaEngine. */ -}; - -/** - * Parameters for TRTBackend. - */ -struct TRTBackendParams -{ - // This constructor is for backward compatibility with all the other core modules which use trtbackend. - TRTBackendParams(std::vector inputLayers, std::vector inputDims, - std::vector outputLayers, std::string weightsPath, std::string enginePath, - TRTBackendPrecision precision, int batchSize, ModelType modelType, bool explicitBatch, - void *engine = nullptr, TRTBackendBlobLayout layout = PLANAR) - : inputLayers(inputLayers) - , inputDims(inputDims) - , outputLayers(outputLayers) - , weightsPath(weightsPath) - , enginePath(enginePath) - , precision(precision) - , batchSize(batchSize) - , modelType(modelType) - , explicitBatch(explicitBatch) - , trtEngineInMemory(engine) - , inputLayout(layout) - { - } - - std::vector inputLayers; /**< names of input layers. */ - std::vector inputDims; /**< dimensions of input layers. */ - std::vector outputLayers; /**< names of output layers. */ - std::string weightsPath; /**< model weight path. */ - std::string enginePath; /**< TensorRT engine path. */ - TRTBackendPrecision precision; /**< TensorRT engine precision. */ - int batchSize; /**< inference batch size. */ - ModelType modelType; /**< model type. */ - bool explicitBatch; /**< whether it is explicit batch dimension. */ - void * - trtEngineInMemory; /**< pointer to hold deserialized TensorRT ICudaEngine, for ModelType::TRT_ENGINE_IN_MEMORY. */ - TRTBackendBlobLayout inputLayout; /**< input blob layout. */ -}; - -// Forward declaration -struct TRTImpl; - -/** - * TensorRT wrapper class. - */ -class TRTBackend -{ -public: - /** - * Constructor of TRTBackend. - * @param modelFilePath path of the network model. - * @param precision TensorRT precision type. - */ - TRTBackend(const char *modelFilePath, TRTBackendPrecision precision, int batchSize = 1, bool explicitBatch = false); - - /** - * Constructor of TRTBackend. - * @param inputParams parameters of TRTBackend. - */ - TRTBackend(TRTBackendParams &inputParams); - - /** - * Destructor of TRTBackend. - */ - ~TRTBackend(); - - /** - * Run inference. - * @param buffer input GPU buffers. - */ - [[deprecated]] void infer(void **buffer); - - /** - * Run inference. - * @param buffer input GPU buffers. - * @param batchSize run infer with specific batch size, passed in setBindingDimension() call. - * @param stream update cuda stream in this instance. - */ - void infer(void **buffer, int batchSize, cudaStream_t stream); - - /** - * Get the cuda stream for TRTBackend. - * @return return cuda stream ID. - */ - [[deprecated]] cudaStream_t getCUDAStream() const; - - /** - * Set the cuda stream for TRTBackend. - * @param stream specified cudaStream_t for the TensorRT Engine. - */ - [[deprecated]] void setCUDAStream(cudaStream_t stream); - - /** - * Get all input/output bindings count. - * @return number of all bindings. - */ - int getBlobCount() const; - - /** - * Get the blob dimension for given blob index. - * @param blobIndex blob index. - * @return blob dimension for the given index. - */ - TRTBackendBlobSize getTRTBackendBlobSize(int blobIndex) const; - - /** - * Get the total number of elements for the given blob index. - * @param blobIndex blob index. - * @return total size for the given index. - */ - int getBlobLinearSize(int blobIndex) const; - - /** - * Get the blob index for the given blob name. - * @param blobName blob name. - * @return blob index for the given name. - */ - int getBlobIndex(const char *blobName) const; - - /** - * Check if binding is input. - * @param index binding index. - * @return whether the binding is input. - */ - bool bindingIsInput(const int index) const; - -private: - // TRT related variables - std::unique_ptr m_pImpl; -}; - -} // namespace cvcore - -#endif // CVCORE_TRTBACKEND_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/Array.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/Array.cpp deleted file mode 100644 index 05cd535..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/Array.cpp +++ /dev/null @@ -1,145 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cv/core/Array.h" - -#include - -#include -#include - -namespace cvcore { - -ArrayBase::ArrayBase() - : m_data{nullptr} - , m_size{0} - , m_capacity{0} - , m_elemSize{0} - , m_isOwning{false} - , m_isCPU{true} -{ -} - -ArrayBase::ArrayBase(std::size_t capacity, std::size_t elemSize, void *dataPtr, bool isCPU) - : ArrayBase() -{ - m_isOwning = false; - m_isCPU = isCPU; - m_elemSize = elemSize; - m_capacity = capacity; - m_data = dataPtr; -} - -ArrayBase::ArrayBase(std::size_t capacity, std::size_t elemSize, bool isCPU) - : ArrayBase(capacity, elemSize, nullptr, isCPU) -{ - m_isOwning = true; - - // allocate - const size_t arraySize = capacity * elemSize; - if (arraySize > 0) - { - if (isCPU) - { - m_data = std::malloc(arraySize); - } - else - { - if (cudaMalloc(&m_data, arraySize) != 0) - { - throw std::runtime_error("CUDA alloc() failed!"); - } - } - } -} - -ArrayBase::~ArrayBase() -{ - if (m_isOwning) - { - if (m_isCPU) - { - std::free(m_data); - } - else - { - cudaFree(m_data); - } - } -} - -ArrayBase::ArrayBase(ArrayBase &&t) - : ArrayBase() -{ - *this = std::move(t); -} - -ArrayBase &ArrayBase::operator=(ArrayBase &&t) -{ - using std::swap; - - swap(m_data, t.m_data); - swap(m_size, t.m_size); - swap(m_capacity, t.m_capacity); - swap(m_elemSize, t.m_elemSize); - swap(m_isOwning, t.m_isOwning); - swap(m_isCPU, t.m_isCPU); - - return *this; -} - -void *ArrayBase::getElement(int idx) const -{ - assert(idx >= 0 && idx < m_capacity); - return reinterpret_cast(m_data) + idx * m_elemSize; -} - -std::size_t ArrayBase::getSize() const -{ - return m_size; -} -std::size_t ArrayBase::getCapacity() const -{ - return m_capacity; -} -std::size_t ArrayBase::getElementSize() const -{ - return m_elemSize; -} - -void ArrayBase::setSize(std::size_t size) -{ - assert(size <= m_capacity); - m_size = size; -} - -bool ArrayBase::isCPU() const -{ - return m_isCPU; -} - -bool ArrayBase::isOwning() const -{ - return m_isOwning; -} - -void *ArrayBase::getData() const -{ - return m_data; -} - -} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/Dummy.cu b/isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/Dummy.cu deleted file mode 100644 index e69de29..0000000 diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/MathTypes.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/MathTypes.cpp deleted file mode 100644 index e0b4adc..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/MathTypes.cpp +++ /dev/null @@ -1,244 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cv/core/MathTypes.h" - -namespace cvcore { - -namespace { - -AxisAngleRotation RotationMatrixToAxisAngleUtil(const std::vector &rotMatrix) -{ - assert(rotMatrix.size() == 9); - AxisAngleRotation axisangle; - double row0 = 0.5 * (rotMatrix[7] - rotMatrix[5]); - double row1 = 0.5 * (rotMatrix[2] - rotMatrix[6]); - double row2 = 0.5 * (rotMatrix[3] - rotMatrix[1]); - - double sin_angle = std::sqrt(row0 * row0 + row1 * row1 + row2 * row2); - double cos_angle = 0.5 * (rotMatrix[0] + rotMatrix[4] + rotMatrix[8] - 1.0); - sin_angle = sin_angle > 1.0 ? 1.0 : sin_angle; - cos_angle = cos_angle > 1.0 ? 1.0 : (cos_angle < -1.0 ? -1.0 : cos_angle); - - if (sin_angle == 0.0) - { - axisangle.angle = 0; - axisangle.axis.x = 0; - axisangle.axis.y = 0; - axisangle.axis.z = 0; - } - else - { - axisangle.angle = std::atan2(sin_angle, cos_angle); - axisangle.axis.x = row0 / sin_angle; - axisangle.axis.y = row1 / sin_angle; - axisangle.axis.z = row2 / sin_angle; - } - return axisangle; -} -} // namespace - -Vector3d RotationMatrixToRotationVector(const std::vector &rotMatrix) -{ - AxisAngleRotation axisangle = RotationMatrixToAxisAngleUtil(rotMatrix); - Vector3d rotVector; - rotVector.x = axisangle.angle * axisangle.axis.x; - rotVector.y = axisangle.angle * axisangle.axis.y; - rotVector.z = axisangle.angle * axisangle.axis.z; - return rotVector; -} - -AxisAngleRotation RotationMatrixToAxisAngleRotation(const std::vector &rotMatrix) -{ - AxisAngleRotation axisangle = RotationMatrixToAxisAngleUtil(rotMatrix); - return axisangle; -} - -std::vector AxisAngleToRotationMatrix(const AxisAngleRotation &axisangle) -{ - std::vector rotMatrix; - rotMatrix.resize(9); - double cosangle = std::cos(axisangle.angle); - double sinagle = std::sin(axisangle.angle); - double temp = 1.0 - cosangle; - - rotMatrix[0] = cosangle + axisangle.axis.x * axisangle.axis.x * temp; - rotMatrix[4] = cosangle + axisangle.axis.y * axisangle.axis.y * temp; - rotMatrix[8] = cosangle + axisangle.axis.z * axisangle.axis.z * temp; - - double value1 = axisangle.axis.x * axisangle.axis.y * temp; - double value2 = axisangle.axis.z * sinagle; - rotMatrix[3] = value1 + value2; - rotMatrix[1] = value1 - value2; - value1 = axisangle.axis.x * axisangle.axis.z * temp; - value2 = axisangle.axis.y * sinagle; - rotMatrix[6] = value1 - value2; - rotMatrix[2] = value1 + value2; - value1 = axisangle.axis.y * axisangle.axis.z * temp; - value2 = axisangle.axis.x * sinagle; - rotMatrix[7] = value1 + value2; - rotMatrix[5] = value1 - value2; - return rotMatrix; -} - -Vector3d AxisAngleRotationToRotationVector(const AxisAngleRotation &axisangle) -{ - double angle = axisangle.angle; - Vector3d rotVector; - rotVector.x = angle * axisangle.axis.x; - rotVector.y = angle * axisangle.axis.y; - rotVector.z = angle * axisangle.axis.z; - return rotVector; -} - -AxisAngleRotation RotationVectorToAxisAngleRotation(const Vector3d &rotVector) -{ - double normVector = rotVector.x * rotVector.x + rotVector.y * rotVector.y + rotVector.z * rotVector.z; - normVector = std::sqrt(normVector); - AxisAngleRotation axisangle; - if (normVector == 0) - { - axisangle.angle = 0; - axisangle.axis.x = 0; - axisangle.axis.y = 0; - axisangle.axis.z = 0; - } - else - { - axisangle.angle = normVector; - axisangle.axis.x = rotVector.x / normVector; - axisangle.axis.y = rotVector.y / normVector; - axisangle.axis.z = rotVector.z / normVector; - } - return axisangle; -} - -Quaternion AxisAngleRotationToQuaternion(const AxisAngleRotation &axisangle) -{ - Quaternion qrotation; - qrotation.qx = axisangle.axis.x * sin(axisangle.angle / 2); - qrotation.qy = axisangle.axis.y * sin(axisangle.angle / 2); - qrotation.qz = axisangle.axis.z * sin(axisangle.angle / 2); - qrotation.qw = std::cos(axisangle.angle / 2); - return qrotation; -} - -AxisAngleRotation QuaternionToAxisAngleRotation(const Quaternion &qrotation) -{ - Quaternion qtemp(qrotation.qx, qrotation.qy, qrotation.qz, qrotation.qw); - if (qrotation.qw > 1) - { - double qnorm = qrotation.qx * qrotation.qx + qrotation.qy * qrotation.qy + qrotation.qz * qrotation.qz + - qrotation.qw * qrotation.qw; - qnorm = std::sqrt(qnorm); - qtemp.qx = qrotation.qx / qnorm; - qtemp.qy = qrotation.qy / qnorm; - qtemp.qz = qrotation.qz / qnorm; - qtemp.qw = qrotation.qw / qnorm; - } - AxisAngleRotation axisangle; - axisangle.angle = 2 * std::acos(qtemp.qw); - double normaxis = std::sqrt(1 - qtemp.qw * qtemp.qw); - if (normaxis < 0.001) - { - axisangle.axis.x = qtemp.qx; - axisangle.axis.y = qtemp.qy; - axisangle.axis.z = qtemp.qz; - } - else - { - axisangle.axis.x = qtemp.qx / normaxis; - axisangle.axis.y = qtemp.qy / normaxis; - axisangle.axis.z = qtemp.qz / normaxis; - } - return axisangle; -} - -std::vector QuaternionToRotationMatrix(const Quaternion &qrotation) -{ - std::vector rotMatrix; - rotMatrix.resize(9); - double qxsquare = qrotation.qx * qrotation.qx; - double qysquare = qrotation.qy * qrotation.qy; - double qzsquare = qrotation.qz * qrotation.qz; - double qwsquare = qrotation.qw * qrotation.qw; - - // Ensure quaternion is normalized - double invsersenorm = 1 / (qxsquare + qysquare + qzsquare + qwsquare); - rotMatrix[0] = (qxsquare - qysquare - qzsquare + qwsquare) * invsersenorm; - rotMatrix[4] = (-qxsquare + qysquare - qzsquare + qwsquare) * invsersenorm; - rotMatrix[8] = (-qxsquare - qysquare + qzsquare + qwsquare) * invsersenorm; - - double value1 = qrotation.qx * qrotation.qy; - double value2 = qrotation.qz * qrotation.qw; - rotMatrix[3] = 2.0 * (value1 + value2) * invsersenorm; - rotMatrix[1] = 2.0 * (value1 - value2) * invsersenorm; - - value1 = qrotation.qx * qrotation.qz; - value2 = qrotation.qy * qrotation.qw; - rotMatrix[6] = 2.0 * (value1 - value2) * invsersenorm; - rotMatrix[2] = 2.0 * (value1 + value2) * invsersenorm; - value1 = qrotation.qz * qrotation.qy; - value2 = qrotation.qx * qrotation.qw; - rotMatrix[7] = 2.0 * (value1 + value2) * invsersenorm; - rotMatrix[5] = 2.0 * (value1 - value2) * invsersenorm; - return rotMatrix; -} - -Quaternion RotationMatrixToQuaternion(const std::vector &rotMatrix) -{ - Quaternion qrotation; - double diagsum = rotMatrix[0] + rotMatrix[4] + rotMatrix[8]; - if (diagsum > 0) - { - double temp = 1 / (2 * std::sqrt(diagsum + 1.0)); - qrotation.qw = 0.25 / temp; - qrotation.qx = (rotMatrix[7] - rotMatrix[5]) * temp; - qrotation.qy = (rotMatrix[2] - rotMatrix[6]) * temp; - qrotation.qz = (rotMatrix[3] - rotMatrix[1]) * temp; - } - else - { - if (rotMatrix[0] > rotMatrix[4] && rotMatrix[0] > rotMatrix[8]) - { - double temp = 2 * std::sqrt(rotMatrix[0] - rotMatrix[4] - rotMatrix[8] + 1.0); - qrotation.qx = 0.25 * temp; - qrotation.qw = (rotMatrix[7] - rotMatrix[5]) * temp; - qrotation.qy = (rotMatrix[2] - rotMatrix[6]) * temp; - qrotation.qz = (rotMatrix[3] - rotMatrix[1]) * temp; - } - else if (rotMatrix[0] > rotMatrix[8]) - { - double temp = 2 * std::sqrt(rotMatrix[4] - rotMatrix[0] - rotMatrix[8] + 1.0); - qrotation.qy = 0.25 * temp; - qrotation.qx = (rotMatrix[7] - rotMatrix[5]) * temp; - qrotation.qw = (rotMatrix[2] - rotMatrix[6]) * temp; - qrotation.qz = (rotMatrix[3] - rotMatrix[1]) * temp; - } - else - { - double temp = 2 * std::sqrt(rotMatrix[8] - rotMatrix[4] - rotMatrix[0] + 1.0); - qrotation.qz = 0.25 * temp; - qrotation.qx = (rotMatrix[7] - rotMatrix[5]) * temp; - qrotation.qy = (rotMatrix[2] - rotMatrix[6]) * temp; - qrotation.qw = (rotMatrix[3] - rotMatrix[1]) * temp; - } - } - return qrotation; -} - -} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/Tensor.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/Tensor.cpp deleted file mode 100644 index 83ede60..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/core/cvcore/Tensor.cpp +++ /dev/null @@ -1,270 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cv/core/Tensor.h" - -#include - -#include -#include -#include -#include -#include - -namespace cvcore { - -TensorBase::TensorBase() - : m_data(nullptr) - , m_dimCount(0) - , m_type(U8) - , m_isOwning(false) - , m_isCPU(true) -{ - for (int i = 0; i < kMaxDimCount; ++i) - { - m_dimData[i] = {0, 0}; - } -} - -TensorBase::TensorBase(ChannelType type, const DimData *dimData, int dimCount, void *dataPtr, bool isCPU) - : TensorBase() -{ - assert(dimCount >= kMinDimCount && dimCount <= kMaxDimCount); - - m_isOwning = false; - m_isCPU = isCPU; - - m_type = type; - m_dimCount = dimCount; - for (int i = 0; i < dimCount; ++i) - { - m_dimData[i] = dimData[i]; - } - - m_data = dataPtr; -} - -TensorBase::TensorBase(ChannelType type, std::initializer_list dimData, void *dataPtr, bool isCPU) - : TensorBase(type, dimData.begin(), dimData.size(), dataPtr, isCPU) -{ -} - -TensorBase::TensorBase(ChannelType type, const DimData *dimData, int dimCount, bool isCPU) - : TensorBase(type, dimData, dimCount, nullptr, isCPU) -{ - m_isOwning = true; - - // compute tensor memory block size - const std::size_t tensorSize = getDataSize(); - - // allocate - if (isCPU) - { - m_data = std::malloc(tensorSize); - } - else - { - if (cudaMalloc(&m_data, tensorSize) != 0) - { - throw std::runtime_error("CUDA alloc() failed!"); - } - } -} - -TensorBase::TensorBase(ChannelType type, std::initializer_list dimData, bool isCPU) - : TensorBase(type, dimData.begin(), dimData.size(), isCPU) -{ -} - -TensorBase::~TensorBase() -{ - if (m_isOwning) - { - if (m_isCPU) - { - std::free(m_data); - } - else - { - cudaFree(m_data); - } - } -} - -TensorBase::TensorBase(TensorBase &&t) - : TensorBase() -{ - *this = std::move(t); -} - -TensorBase &TensorBase::operator=(TensorBase &&t) -{ - using std::swap; - - swap(m_data, t.m_data); - swap(m_dimCount, t.m_dimCount); - swap(m_type, t.m_type); - swap(m_isOwning, t.m_isOwning); - swap(m_isCPU, t.m_isCPU); - - for (int i = 0; i < kMaxDimCount; ++i) - { - swap(m_dimData[i], t.m_dimData[i]); - } - - return *this; -} - -int TensorBase::getDimCount() const -{ - return m_dimCount; -} - -std::size_t TensorBase::getSize(int dimIdx) const -{ - assert(dimIdx >= 0 && dimIdx < m_dimCount); - return m_dimData[dimIdx].size; -} - -std::size_t TensorBase::getStride(int dimIdx) const -{ - assert(dimIdx >= 0 && dimIdx < m_dimCount); - return m_dimData[dimIdx].stride; -} - -ChannelType TensorBase::getType() const -{ - return m_type; -} - -void *TensorBase::getData() const -{ - return m_data; -} - -std::size_t TensorBase::getDataSize() const -{ - std::size_t tensorSize = m_dimData[0].size * m_dimData[0].stride; - for (int i = 1; i < m_dimCount; ++i) - { - tensorSize = std::max(tensorSize, m_dimData[i].size * m_dimData[i].stride); - } - tensorSize *= GetChannelSize(m_type); - return tensorSize; -} - -bool TensorBase::isCPU() const -{ - return m_isCPU; -} - -bool TensorBase::isOwning() const -{ - return m_isOwning; -} - -std::string GetTensorLayoutAsString(TensorLayout TL) -{ - switch (TL) - { - case TensorLayout::CL: - return "CL"; - case TensorLayout::LC: - return "LC"; - case TensorLayout::HWC: - return "HWC"; - case TensorLayout::CHW: - return "CHW"; - case TensorLayout::DHWC: - return "DHWC"; - case TensorLayout::DCHW: - return "DCHW"; - case TensorLayout::CDHW: - return "CDHW"; - default: - throw std::runtime_error("Invalid TensorLayout"); - } -} - -std::string GetChannelCountAsString(ChannelCount CC) -{ - switch (CC) - { - case ChannelCount::C1: - return "C1"; - case ChannelCount::C2: - return "C2"; - case ChannelCount::C3: - return "C3"; - case ChannelCount::C4: - return "C4"; - case ChannelCount::CX: - return "CX"; - default: - throw std::runtime_error("Invalid ChannelCount"); - } -} - -std::string GetChannelTypeAsString(ChannelType CT) -{ - switch (CT) - { - case ChannelType::U8: - return "U8"; - case ChannelType::U16: - return "U16"; - case ChannelType::S8: - return "S8"; - case ChannelType::S16: - return "S16"; - case ChannelType::F16: - return "F16"; - case ChannelType::F32: - return "F32"; - case ChannelType::F64: - return "F64"; - default: - throw std::runtime_error("Invalid ChannelType"); - } -} - -std::size_t GetChannelSize(ChannelType CT) -{ - switch (CT) - { - case U8: - case S8: - return 1; - case F16: - case U16: - case S16: - return 2; - case F32: - return 4; - case F64: - return 8; - default: - throw std::runtime_error("Invalid ChannelType"); - } -} - -std::string GetMemoryTypeAsString(bool isCPU) -{ - return isCPU? "CPU" : "GPU"; -} - -} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/core/utility/CVError.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/core/utility/CVError.cpp deleted file mode 100644 index a6e62c7..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/core/utility/CVError.cpp +++ /dev/null @@ -1,123 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cv/core/CVError.h" - -#include -#include -#include - -#ifndef __cpp_lib_to_underlying -// Using a C++23 feature by hacking std -namespace std -{ - template - constexpr underlying_type_t to_underlying(Enum e) noexcept - { - return static_cast>(e); - } -}; -#endif // __cpp_lib_to_underlying - -namespace cvcore { - -namespace detail -{ - struct CoreErrorCategory : std::error_category - { - virtual const char * name() const noexcept override final - { - return "cvcore-error"; - } - - virtual std::string message(int value) const override final - { - std::string result; - - switch(value) - { - case std::to_underlying(ErrorCode::SUCCESS): - result = "(SUCCESS) No errors detected"; - break; - case std::to_underlying(ErrorCode::NOT_READY): - result = "(NOT_READY) The execution of the requested " - "operation is not to return"; - break; - case std::to_underlying(ErrorCode::NOT_IMPLEMENTED): - result = "(NOT_IMPLEMENTED) The requested operation is not " - "implemented"; - break; - case std::to_underlying(ErrorCode::INVALID_ARGUMENT): - result = "(INVALID_ARGUMENT) The argument provided to the " - "operation is not currently supported"; - break; - case std::to_underlying(ErrorCode::INVALID_IMAGE_FORMAT): - result = "(INVALID_IMAGE_FORMAT) The requested image format " - "is not supported by the operation"; - break; - case std::to_underlying(ErrorCode::INVALID_STORAGE_TYPE): - result = "(INVALID_STORAGE_TYPE) The requested storage type " - "is not supported by the operation"; - break; - case std::to_underlying(ErrorCode::INVALID_ENGINE_TYPE): - result = "(INVALID_ENGINE_TYPE) The requested engine type " - "is not supported by the operation"; - break; - case std::to_underlying(ErrorCode::INVALID_OPERATION): - result = "(INVALID_OPERATION) The requested operation is " - "not supported"; - break; - case std::to_underlying(ErrorCode::DETECTED_NAN_IN_RESULT): - result = "(DETECTED_NAN_IN_RESULT) NaN was detected in the " - "return value of the operation"; - break; - case std::to_underlying(ErrorCode::OUT_OF_MEMORY): - result = "(OUT_OF_MEMORY) The device has run out of memory"; - break; - case std::to_underlying(ErrorCode::DEVICE_ERROR): - result = "(DEVICE_ERROR) A device level error has been " - "encountered"; - break; - case std::to_underlying(ErrorCode::SYSTEM_ERROR): - result = "(SYSTEM_ERROR) A system level error has been " - "encountered"; - break; - default: - result = "(Unrecognized Condition) Value " + std::to_string(value) + - " does not map to known error code literal " + - " defined by cvcore::ErrorCode"; - break; - } - - return result; - } - }; -} // namespace detail - -const detail::CoreErrorCategory errorCategory{}; - -std::error_condition make_error_condition(ErrorCode ec) noexcept -{ - return {std::to_underlying(ec), errorCategory}; -} - -std::error_code make_error_code(ErrorCode ec) noexcept -{ - return {std::to_underlying(ec), errorCategory}; -} - -} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/core/utility/Instrumentation.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/core/utility/Instrumentation.cpp deleted file mode 100644 index 583b646..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/core/utility/Instrumentation.cpp +++ /dev/null @@ -1,95 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cv/core/Instrumentation.h" - -#ifdef NVBENCH_ENABLE -#include -#include -#include -#endif - -namespace cvcore { namespace profiler { - -#ifdef NVBENCH_ENABLE -nv::bench::JsonHelper mapProfilerJsonOutputTypeToNvbenchType(ProfilerJsonOutputType jsonType) -{ - nv::bench::JsonHelper nvbenchJsonOutputType = nv::bench::JsonHelper::JSON_OFF; - if (jsonType == ProfilerJsonOutputType::JSON_OFF) - { - nvbenchJsonOutputType = nv::bench::JsonHelper::JSON_OFF; - } - else if (jsonType == ProfilerJsonOutputType::JSON_SEPARATE) - { - nvbenchJsonOutputType = nv::bench::JsonHelper::JSON_SEPARATE; - } - else if (jsonType == ProfilerJsonOutputType::JSON_AGGREGATE) - { - nvbenchJsonOutputType = nv::bench::JsonHelper::JSON_AGGREGATE; - } - return nvbenchJsonOutputType; -} -#endif - -void flush(const std::string& filename, ProfilerJsonOutputType jsonType) -{ -#ifdef NVBENCH_ENABLE - nv::bench::JsonHelper nvbenchJsonOutputType = mapProfilerJsonOutputTypeToNvbenchType(jsonType); - if (!filename.empty()) - { - nv::bench::Pool::instance().flushToFile(filename.c_str(), -1, INT_MAX, nvbenchJsonOutputType); - } - else - { - nv::bench::Pool::instance().flush(std::clog, -1, INT_MAX, nvbenchJsonOutputType); - } -#else - return; -#endif - -} - -void flush(std::ostream& output, ProfilerJsonOutputType jsonType) -{ -#ifdef NVBENCH_ENABLE - nv::bench::JsonHelper nvbenchJsonOutputType = mapProfilerJsonOutputTypeToNvbenchType(jsonType); - nv::bench::Pool::instance().flush(output, -1, INT_MAX, nvbenchJsonOutputType); -#else - return; -#endif -} - -void flush(ProfilerJsonOutputType jsonType) -{ -#ifdef NVBENCH_ENABLE - nv::bench::JsonHelper nvbenchJsonOutputType = mapProfilerJsonOutputTypeToNvbenchType(jsonType); - nv::bench::Pool::instance().flush(std::clog, -1, INT_MAX, nvbenchJsonOutputType); -#else - return; -#endif -} - -void clear() -{ -#ifdef NVBENCH_ENABLE - nv::bench::Pool::instance().clear(); -#else - return; -#endif -} - -}} diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/core/utility/Memory.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/core/utility/Memory.cpp deleted file mode 100644 index e75a614..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/core/utility/Memory.cpp +++ /dev/null @@ -1,124 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cv/core/Memory.h" - -#include - -#include -#include -#include - -namespace cvcore { - -namespace { - -// Copy 2D CPU pitch linear tensors -void Memcpy2DCPU(void *dst, size_t dstPitch, const void *src, size_t srcPitch, size_t widthInBytes, size_t height) -{ - uint8_t *dstPt = reinterpret_cast(dst); - const uint8_t *srcPt = reinterpret_cast(src); - for (size_t i = 0; i < height; i++) - { - memcpy(dstPt, srcPt, widthInBytes); - dstPt += dstPitch; - srcPt += srcPitch; - } -} - -} // anonymous namespace - -void TensorBaseCopy(TensorBase &dst, const TensorBase &src, cudaStream_t stream) -{ - if (dst.getDataSize() != src.getDataSize()) - { - throw std::runtime_error("Tensor stride mismatch!"); - } - assert(dst.getDimCount() == src.getDimCount()); - int dimCount = src.getDimCount(); - for (int i = 0; i < dimCount - 1; i++) - { - if (src.getStride(i) != src.getStride(i + 1) * src.getSize(i + 1) || - dst.getStride(i) != dst.getStride(i + 1) * dst.getSize(i + 1)) - { - throw std::runtime_error("Tensor is not contiguous in memory!"); - } - } - if (dst.isCPU() && src.isCPU()) - { - memcpy(dst.getData(), src.getData(), src.getDataSize()); - return; - } - cudaError_t error; - if (!dst.isCPU() && src.isCPU()) - { - error = cudaMemcpyAsync(dst.getData(), src.getData(), src.getDataSize(), cudaMemcpyHostToDevice, stream); - } - else if (dst.isCPU() && !src.isCPU()) - { - error = cudaMemcpyAsync(dst.getData(), src.getData(), src.getDataSize(), cudaMemcpyDeviceToHost, stream); - } - else - { - error = cudaMemcpyAsync(dst.getData(), src.getData(), src.getDataSize(), cudaMemcpyDeviceToDevice, stream); - } - if (error != cudaSuccess) - { - throw std::runtime_error("CUDA memcpy failed!"); - } -} - -void TensorBaseCopy2D(TensorBase &dst, const TensorBase &src, int dstPitch, int srcPitch, int widthInBytes, int height, - cudaStream_t stream) -{ - assert(dst.getDimCount() == src.getDimCount()); - int dimCount = src.getDimCount(); - for (int i = 0; i < dimCount; i++) - { - if (dst.getSize(i) != src.getSize(i)) - { - throw std::runtime_error("Tensor size mismatch!"); - } - } - if (dst.isCPU() && src.isCPU()) - { - Memcpy2DCPU(dst.getData(), dstPitch, src.getData(), srcPitch, widthInBytes, height); - return; - } - cudaError_t error; - if (!dst.isCPU() && src.isCPU()) - { - error = cudaMemcpy2DAsync(dst.getData(), dstPitch, src.getData(), srcPitch, widthInBytes, height, - cudaMemcpyHostToDevice, stream); - } - else if (dst.isCPU() && !src.isCPU()) - { - error = cudaMemcpy2DAsync(dst.getData(), dstPitch, src.getData(), srcPitch, widthInBytes, height, - cudaMemcpyDeviceToHost, stream); - } - else - { - error = cudaMemcpy2DAsync(dst.getData(), dstPitch, src.getData(), srcPitch, widthInBytes, height, - cudaMemcpyDeviceToDevice, stream); - } - if (error != cudaSuccess) - { - throw std::runtime_error("CUDA memcpy failed!"); - } -} - -} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/core/utility/ProfileUtils.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/core/utility/ProfileUtils.cpp deleted file mode 100644 index 53c5811..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/core/utility/ProfileUtils.cpp +++ /dev/null @@ -1,128 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include - -#include - -#include -#include -#include - -#if defined(_MSC_VER) || defined(__WIN32) -# include -# include -# include -#endif - -using json = nlohmann::json; - -namespace cvcore { - -namespace { - -#if defined(_MSC_VER) || defined(__WIN32) -std::string GetCPUName() -{ - // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2008/hskdteyh(v=vs.90)?redirectedfrom=MSDN - char CPUBrandString[0x40]; - int CPUInfo[4] = {-1}; - - // Calling __cpuid with 0x80000000 as the InfoType argument - // gets the number of valid extended IDs. - __cpuid(CPUInfo, 0x80000000); - unsigned i, nExIds = CPUInfo[0]; - memset(CPUBrandString, 0, sizeof(CPUBrandString)); - - // Get the information associated with each extended ID. - for (i=0x80000000; i<=nExIds; ++i) - { - __cpuid(CPUInfo, i); - - // Interpret CPU brand string and cache information. - if (i == 0x80000002) - memcpy(CPUBrandString, CPUInfo, sizeof(CPUInfo)); - else if (i == 0x80000003) - memcpy(CPUBrandString + 16, CPUInfo, sizeof(CPUInfo)); - else if (i == 0x80000004) - memcpy(CPUBrandString + 32, CPUInfo, sizeof(CPUInfo)); - } - return CPUBrandString; -} -#else -std::string GetCPUName() -{ - // TODO: this will only work on linux platform - std::ifstream cpuInfo("/proc/cpuinfo"); - if (!cpuInfo.good()) - { - throw std::runtime_error("unable to retrieve cpu info"); - } - std::string line; - while (std::getline(cpuInfo, line)) - { - int delimiterPos = line.find(':'); - if (delimiterPos != std::string::npos) - { - std::string key = line.substr(0, delimiterPos); - if (key.find("model name") != std::string::npos) - { - std::string info = line.substr(delimiterPos + 1); - info.erase(0, info.find_first_not_of(' ')); - return info; - } - } - } - return "CPU"; // default name if no cpu model name retrieved -} -#endif - -std::string GetGPUName() -{ - int deviceId; - cudaGetDevice(&deviceId); - cudaDeviceProp prop; - cudaError_t error = cudaGetDeviceProperties(&prop, deviceId); - if (error != 0) - { - throw std::runtime_error("unable to retrieve cuda device info"); - } - return std::string(prop.name); -} - -} // anonymous namespace - -void ExportToJson(const std::string outputPath, const std::string taskName, float tMin, float tMax, float tAvg, - bool isCPU, int iterations = 100) -{ - std::ifstream in(outputPath); - json jsonHandler; - if (in.good()) - { - in >> jsonHandler; - } - in.close(); - - const std::string platform = isCPU ? "CPU: " + GetCPUName() : "GPU: " + GetGPUName(); - jsonHandler[platform][taskName] = {{"iter", iterations}, {"min", tMin}, {"max", tMax}, {"avg", tAvg}}; - - std::ofstream out(outputPath); - out << std::setw(4) << jsonHandler << std::endl; - out.close(); -} - -} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/tensorrt/TensorRTInferencer.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/tensorrt/TensorRTInferencer.cpp deleted file mode 100644 index db5a0d0..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/tensorrt/TensorRTInferencer.cpp +++ /dev/null @@ -1,275 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include "TensorRTInferencer.h" -#include -#include -#include "TensorRTUtils.h" - -#include "cv/inferencer/Errors.h" -#include "cv/inferencer/IInferenceBackend.h" -#include "cv/inferencer/Inferencer.h" - -namespace cvcore { namespace inferencer { - -namespace { -size_t getDataSize(const std::vector &shape, cvcore::ChannelType dataType) -{ - size_t layerShape = 1; - for (size_t k = 0; k < shape.size(); k++) - layerShape *= shape[k] <= 0 ? 1 : shape[k]; - - return layerShape * GetChannelSize(dataType); -} -} // namespace - -std::error_code TensorRTInferencer::getLayerInfo(LayerInfo &layer, std::string layerName) -{ - layer.name = layerName; - layer.index = m_inferenceEngine->getBindingIndex(layerName.c_str()); - auto dim = m_inferenceEngine->getBindingDimensions(layer.index); - nvinfer1::TensorFormat tensorFormat = m_inferenceEngine->getBindingFormat(layer.index); - - std::error_code err; - err = getCVCoreChannelLayoutFromTensorRT(layer.layout, tensorFormat); - if (err != cvcore::make_error_code(ErrorCode::SUCCESS)) - { - return ErrorCode::INVALID_ARGUMENT; - } - - for (size_t cnt = 0; cnt < dim.nbDims; cnt++) - { - layer.shape.push_back(dim.d[cnt]); - } - - err = getCVCoreChannelTypeFromTensorRT(layer.dataType, m_inferenceEngine->getBindingDataType(layer.index)); - layer.layerSize = getDataSize(layer.shape, layer.dataType); - if (err != cvcore::make_error_code(ErrorCode::SUCCESS)) - { - return ErrorCode::INVALID_ARGUMENT; - } - - return ErrorCode::SUCCESS; -} - -std::error_code TensorRTInferencer::ParseTRTModel() -{ - - m_modelInfo.modelName = m_inferenceEngine->getName(); - m_modelInfo.modelVersion = ""; - m_modelInfo.maxBatchSize = m_maxBatchSize; - std::error_code err; - for (size_t i = 0; i < m_inputLayers.size(); i++) - { - LayerInfo layer; - err = getLayerInfo(layer, m_inputLayers[i]); - if (err != cvcore::make_error_code(cvcore::ErrorCode::SUCCESS)) - { - return err; - } - m_modelInfo.inputLayers[layer.name] = layer; - } - for (size_t i = 0; i < m_outputLayers.size(); i++) - { - LayerInfo layer; - err = getLayerInfo(layer, m_outputLayers[i]); - if (err != cvcore::make_error_code(cvcore::ErrorCode::SUCCESS)) - { - return err; - } - m_modelInfo.outputLayers[layer.name] = layer; - } - - return ErrorCode::SUCCESS; -} - -TensorRTInferencer::TensorRTInferencer(const TensorRTInferenceParams ¶ms) - : m_logger(new TRTLogger()) - , m_maxBatchSize(params.maxBatchSize) - , m_inputLayers(params.inputLayerNames) - , m_outputLayers(params.outputLayerNames) - , m_cudaStream(0) - , m_inferenceEngine(nullptr) -{ - - if (params.inferType == TRTInferenceType::TRT_ENGINE) - { - std::ifstream trtModelFStream(params.engineFilePath, std::ios::binary); - std::unique_ptr trtModelContent; - size_t trtModelContentSize = 0; - - if (!trtModelFStream.good()) - { - throw ErrorCode::INVALID_ARGUMENT; - } - else - { - trtModelFStream.seekg(0, trtModelFStream.end); - trtModelContentSize = trtModelFStream.tellg(); - trtModelFStream.seekg(0, trtModelFStream.beg); - trtModelContent.reset(new char[trtModelContentSize]); - trtModelFStream.read(trtModelContent.get(), trtModelContentSize); - trtModelFStream.close(); - } - - m_inferenceRuntime.reset(nvinfer1::createInferRuntime(*(m_logger.get()))); - if (params.dlaID != -1 && params.dlaID < m_inferenceRuntime->getNbDLACores()) - { - m_inferenceRuntime->setDLACore(params.dlaID); - } - m_inferenceEngine = m_inferenceRuntime->deserializeCudaEngine(trtModelContent.get(), trtModelContentSize); - m_ownedInferenceEngine.reset(m_inferenceEngine); - m_inferenceContext.reset(m_inferenceEngine->createExecutionContext()); - m_inferenceContext->setOptimizationProfileAsync(0, m_cudaStream); - } - else - { - if (params.engine == nullptr) - { - throw ErrorCode::INVALID_ARGUMENT; - } - m_inferenceEngine = params.engine; - m_inferenceContext.reset(m_inferenceEngine->createExecutionContext()); - } - - if (m_inferenceEngine == nullptr || m_inferenceContext == nullptr) - { - throw ErrorCode::INVALID_ARGUMENT; - } - - m_hasImplicitBatch = m_inferenceEngine->hasImplicitBatchDimension(); - m_bindingsCount = m_inferenceEngine->getNbBindings(); - if (!m_hasImplicitBatch) - { - for (size_t i = 0; i < m_bindingsCount; i++) - { - if (m_inferenceEngine->bindingIsInput(i)) - { - nvinfer1::Dims dims_i(m_inferenceEngine->getBindingDimensions(i)); - nvinfer1::Dims4 inputDims{1, dims_i.d[1], dims_i.d[2], dims_i.d[3]}; - m_inferenceContext->setBindingDimensions(i, inputDims); - } - } - } - std::error_code err; - err = ParseTRTModel(); - if (err != cvcore::make_error_code(ErrorCode::SUCCESS)) - { - throw err; - } - m_buffers.resize(m_bindingsCount); -} - -// Set input layer tensor -std::error_code TensorRTInferencer::setInput(const cvcore::TensorBase &trtInputBuffer, std::string inputLayerName) -{ - if (m_modelInfo.inputLayers.find(inputLayerName) == m_modelInfo.inputLayers.end()) - { - return ErrorCode::INVALID_ARGUMENT; - } - LayerInfo layer = m_modelInfo.inputLayers[inputLayerName]; - m_buffers[layer.index] = trtInputBuffer.getData(); - return ErrorCode::SUCCESS; -} - -// Sets output layer tensor -std::error_code TensorRTInferencer::setOutput(cvcore::TensorBase &trtOutputBuffer, std::string outputLayerName) -{ - if (m_modelInfo.outputLayers.find(outputLayerName) == m_modelInfo.outputLayers.end()) - { - return ErrorCode::INVALID_ARGUMENT; - } - LayerInfo layer = m_modelInfo.outputLayers[outputLayerName]; - m_buffers[layer.index] = trtOutputBuffer.getData(); - return ErrorCode::SUCCESS; -} - -// Get the model metadata parsed based on the model file -// This would be done in initialize call itself. User can access the modelMetaData created using this API. -ModelMetaData TensorRTInferencer::getModelMetaData() const -{ - return m_modelInfo; -} - -std::error_code TensorRTInferencer::infer(size_t batchSize) -{ - bool err = true; - if (!m_hasImplicitBatch) - { - size_t bindingsCount = m_inferenceEngine->getNbBindings(); - for (size_t i = 0; i < bindingsCount; i++) - { - if (m_inferenceEngine->bindingIsInput(i)) - { - nvinfer1::Dims dims_i(m_inferenceEngine->getBindingDimensions(i)); - nvinfer1::Dims4 inputDims{static_cast(batchSize), dims_i.d[1], dims_i.d[2], dims_i.d[3]}; - m_inferenceContext->setBindingDimensions(i, inputDims); - } - } - err = m_inferenceContext->enqueueV2(&m_buffers[0], m_cudaStream, nullptr); - } - else - { - err = m_inferenceContext->enqueue(m_maxBatchSize, &m_buffers[0], m_cudaStream, nullptr); - } - if (!err) - { - return InferencerErrorCode::TENSORRT_INFERENCE_ERROR; - } - return ErrorCode::SUCCESS; -} - -// Applicable only for Native TRT -std::error_code TensorRTInferencer::setCudaStream(cudaStream_t cudaStream) // Only in TRT -{ - m_cudaStream = cudaStream; - return ErrorCode::SUCCESS; -} - -std::error_code TensorRTInferencer::unregister(std::string layerName) -{ - size_t index; - if (m_modelInfo.outputLayers.find(layerName) != m_modelInfo.outputLayers.end()) - { - index = m_modelInfo.outputLayers[layerName].index; - } - else if (m_modelInfo.inputLayers.find(layerName) != m_modelInfo.inputLayers.end()) - { - index = m_modelInfo.inputLayers[layerName].index; - } - else - { - return ErrorCode::INVALID_ARGUMENT; - } - m_buffers[index] = nullptr; - return ErrorCode::SUCCESS; -} - -std::error_code TensorRTInferencer::unregister() -{ - for (size_t i = 0; i < m_buffers.size(); i++) - { - m_buffers[i] = nullptr; - } - return ErrorCode::SUCCESS; -} - -TensorRTInferencer::~TensorRTInferencer() -{ - m_buffers.clear(); -} - -}} // namespace cvcore::inferencer diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/tensorrt/TensorRTUtils.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/tensorrt/TensorRTUtils.cpp deleted file mode 100644 index 3a1696a..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/tensorrt/TensorRTUtils.cpp +++ /dev/null @@ -1,64 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "TensorRTUtils.h" -#include - -namespace cvcore { namespace inferencer { - -std::error_code getCVCoreChannelTypeFromTensorRT(cvcore::ChannelType &channelType, nvinfer1::DataType dtype) -{ - if (dtype == nvinfer1::DataType::kINT8) - { - channelType = cvcore::ChannelType::U8; - } - else if (dtype == nvinfer1::DataType::kHALF) - { - channelType = cvcore::ChannelType::F16; - } - else if (dtype == nvinfer1::DataType::kFLOAT) - { - channelType = cvcore::ChannelType::F32; - } - else - { - return ErrorCode::INVALID_OPERATION; - } - - return ErrorCode::SUCCESS; -} - -std::error_code getCVCoreChannelLayoutFromTensorRT(cvcore::TensorLayout &channelLayout, - nvinfer1::TensorFormat tensorFormat) -{ - if (tensorFormat == nvinfer1::TensorFormat::kLINEAR) - { - channelLayout = cvcore::TensorLayout::NCHW; - } - else if (tensorFormat == nvinfer1::TensorFormat::kHWC) - { - channelLayout = cvcore::TensorLayout::HWC; - } - else - { - return ErrorCode::INVALID_OPERATION; - } - - return ErrorCode::SUCCESS; -} - -}} // namespace cvcore::inferencer diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/triton/TritonUtils.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/triton/TritonUtils.cpp deleted file mode 100644 index 47b3653..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/triton/TritonUtils.cpp +++ /dev/null @@ -1,84 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifdef ENABLE_TRITON -#include "TritonUtils.h" -#include - -namespace cvcore { namespace inferencer { - -bool getCVCoreChannelType(cvcore::ChannelType &channelType, std::string dtype) -{ - if (dtype.compare("UINT8") == 0) - { - channelType = cvcore::ChannelType::U8; - } - else if (dtype.compare("UINT16") == 0) - { - channelType = cvcore::ChannelType::U16; - } - else if (dtype.compare("FP16") == 0) - { - channelType = cvcore::ChannelType::F16; - } - else if (dtype.compare("FP32") == 0) - { - channelType = cvcore::ChannelType::F32; - } - else if (dtype.compare("FP64") == 0) - { - channelType = cvcore::ChannelType::F64; - } - else - { - return false; - } - - return true; -} - -bool getTritonChannelType(std::string &dtype, cvcore::ChannelType channelType) -{ - if (channelType == cvcore::ChannelType::U8) - { - dtype = "UINT8"; - } - else if (channelType == cvcore::ChannelType::U16) - { - dtype = "UINT16"; - } - else if (channelType == cvcore::ChannelType::F16) - { - dtype = "FP16"; - } - else if (channelType == cvcore::ChannelType::F32) - { - dtype = "FP32"; - } - else if (channelType == cvcore::ChannelType::F64) - { - dtype = "FP64"; - } - else - { - return false; - } - - return true; -} - -}} // namespace cvcore::inferencer -#endif diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/ArithmeticOperations.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/ArithmeticOperations.cpp deleted file mode 100644 index 85d5e2f..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/ArithmeticOperations.cpp +++ /dev/null @@ -1,329 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "NppUtils.h" - -#include "cv/tensor_ops/ImageUtils.h" - -#include "cv/core/Memory.h" - -#include -#include - -#include -#include -#include - -namespace cvcore { namespace tensor_ops { - -namespace { - -static void NormalizeTensorC3F32Inplace(Tensor &src, const float scale[3], const float offset[3], - NppStreamContext streamContext) -{ - const int srcW = src.getWidth(); - const int srcH = src.getHeight(); - const NppiSize srcSize = {srcW, srcH}; - - const Npp32f offsets[3] = {static_cast(offset[0]), static_cast(offset[1]), - static_cast(offset[2])}; - NppStatus status = - nppiAddC_32f_C3IR_Ctx(offsets, static_cast(src.getData()), - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), srcSize, streamContext); - assert(status == NPP_SUCCESS); - - const Npp32f scales[3] = {static_cast(scale[0]), static_cast(scale[1]), - static_cast(scale[2])}; - status = nppiMulC_32f_C3IR_Ctx(scales, static_cast(src.getData()), - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), srcSize, streamContext); - assert(status == NPP_SUCCESS); -} - -template -static void NormalizeTensorC1F32Inplace(Tensor &src, const float scale, const float offset, - NppStreamContext streamContext) -{ - const int srcW = src.getWidth(); - const int srcH = src.getHeight(); - const NppiSize srcSize = {srcW, srcH}; - - NppStatus status = - nppiAddC_32f_C1IR_Ctx(static_cast(offset), static_cast(src.getData()), - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), srcSize, streamContext); - assert(status == NPP_SUCCESS); - - status = nppiMulC_32f_C1IR_Ctx(static_cast(scale), static_cast(src.getData()), - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), srcSize, streamContext); - assert(status == NPP_SUCCESS); -} - -template -void NormalizeC1U8Impl(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - NppStreamContext streamContext = GetNppStreamContext(stream); - - NppStatus status = nppiConvert_8u32f_C1R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {int(src.getWidth()), int(src.getHeight())}, streamContext); - assert(status == NPP_SUCCESS); - - NormalizeTensorC1F32Inplace(dst, scale, offset, streamContext); -} - -template -void NormalizeC1U16Impl(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - NppStreamContext streamContext = GetNppStreamContext(stream); - - NppStatus status = nppiConvert_16u32f_C1R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {int(src.getWidth()), int(src.getHeight())}, streamContext); - assert(status == NPP_SUCCESS); - - NormalizeTensorC1F32Inplace(dst, scale, offset, streamContext); -} - -template -void NormalizeC1F32Impl(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - Copy(dst, src, stream); - NormalizeTensorC1F32Inplace(dst, scale, offset, GetNppStreamContext(stream)); -} - -template -void NormalizeC3Batch(Tensor &dst, Tensor &src, const float scale[3], - const float offset[3], cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert(src.getDepth() == dst.getDepth()); - - for (int i = 0; i < src.getDepth(); i++) - { - size_t shiftSrc = i * src.getStride(TensorDimension::DEPTH); - size_t shiftDst = i * dst.getStride(TensorDimension::DEPTH); - Tensor srcTmp(src.getWidth(), src.getHeight(), - src.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - src.getData() + shiftSrc, false); - Tensor dstTmp(dst.getWidth(), dst.getHeight(), - dst.getStride(TensorDimension::HEIGHT) * GetChannelSize(F32), - dst.getData() + shiftDst, false); - Normalize(dstTmp, srcTmp, scale, offset, stream); - } -} - -template -void NormalizeC1Batch(Tensor &dst, Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert(src.getDepth() == dst.getDepth()); - - for (int i = 0; i < src.getDepth(); i++) - { - size_t shiftSrc = i * src.getStride(TensorDimension::DEPTH); - size_t shiftDst = i * dst.getStride(TensorDimension::DEPTH); - Tensor srcTmp(src.getWidth(), src.getHeight(), - src.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - src.getData() + shiftSrc, false); - Tensor dstTmp(dst.getWidth(), dst.getHeight(), - dst.getStride(TensorDimension::HEIGHT) * GetChannelSize(F32), - dst.getData() + shiftDst, false); - Normalize(dstTmp, srcTmp, scale, offset, stream); - } -} - -template -void NormalizeC1Batch(Tensor &dst, Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert(src.getDepth() == dst.getDepth()); - - for (int i = 0; i < src.getDepth(); i++) - { - size_t shiftSrc = i * src.getStride(TensorDimension::DEPTH); - size_t shiftDst = i * dst.getStride(TensorDimension::DEPTH); - Tensor srcTmp(src.getWidth(), src.getHeight(), - src.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - src.getData() + shiftSrc, false); - Tensor dstTmp(dst.getWidth(), dst.getHeight(), - dst.getStride(TensorDimension::HEIGHT) * GetChannelSize(F32), - dst.getData() + shiftDst, false); - Normalize(dstTmp, srcTmp, scale, offset, stream); - } -} - -} // anonymous namespace - -void Normalize(Tensor &dst, const Tensor &src, const float scale[3], const float offset[3], - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - NppStreamContext streamContext = GetNppStreamContext(stream); - - NppStatus status = nppiConvert_8u32f_C3R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {int(src.getWidth()), int(src.getHeight())}, streamContext); - assert(status == NPP_SUCCESS); - - NormalizeTensorC3F32Inplace(dst, scale, offset, streamContext); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale[3], const float offset[3], - cudaStream_t stream) -{ - NormalizeC3Batch(dst, const_cast &>(src), scale, offset, stream); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale[3], const float offset[3], - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - NppStreamContext streamContext = GetNppStreamContext(stream); - - NppStatus status = nppiConvert_16u32f_C3R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {int(src.getWidth()), int(src.getHeight())}, streamContext); - assert(status == NPP_SUCCESS); - - NormalizeTensorC3F32Inplace(dst, scale, offset, streamContext); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale[3], - const float offset[3], cudaStream_t stream) -{ - NormalizeC3Batch(dst, const_cast &>(src), scale, offset, stream); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale[3], const float offset[3], - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - Copy(dst, src, stream); - NormalizeTensorC3F32Inplace(dst, scale, offset, GetNppStreamContext(stream)); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale[3], - const float offset[3], cudaStream_t stream) -{ - NormalizeC3Batch(dst, const_cast &>(src), scale, offset, stream); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - NormalizeC1U8Impl(dst, src, scale, offset, stream); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - NormalizeC1Batch(dst, const_cast &>(src), scale, offset, stream); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - NormalizeC1U16Impl(dst, src, scale, offset, stream); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - NormalizeC1Batch(dst, const_cast &>(src), scale, offset, stream); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - NormalizeC1F32Impl(dst, src, scale, offset, stream); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - NormalizeC1Batch(dst, const_cast &>(src), scale, offset, stream); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - NormalizeC1U8Impl(dst, src, scale, offset, stream); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - NormalizeC1Batch(dst, const_cast &>(src), scale, offset, stream); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - NormalizeC1U16Impl(dst, src, scale, offset, stream); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - NormalizeC1Batch(dst, const_cast &>(src), scale, offset, stream); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - NormalizeC1F32Impl(dst, src, scale, offset, stream); -} - -void Normalize(Tensor &dst, const Tensor &src, const float scale, const float offset, - cudaStream_t stream) -{ - NormalizeC1Batch(dst, const_cast &>(src), scale, offset, stream); -} - -}} // namespace cvcore::tensor_ops \ No newline at end of file diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/BBoxUtils.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/BBoxUtils.cpp deleted file mode 100644 index 4559a35..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/BBoxUtils.cpp +++ /dev/null @@ -1,173 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cv/tensor_ops/BBoxUtils.h" - -#include -#include - -namespace cvcore { namespace tensor_ops { - -namespace { - -bool IsValid(const BBox &box) -{ - return box.xmin >= 0 && box.ymin >= 0 && box.xmin < box.xmax && box.ymin < box.ymax; -} - -} // anonymous namespace - -float GetArea(const BBox &box) -{ - if (box.xmax < box.xmin || box.ymax < box.ymin) - { - return 0.f; - } - return static_cast((box.xmax - box.xmin) * (box.ymax - box.ymin)); -} - -float GetIntersection(const BBox &a, const BBox &b) -{ - const int lowerX = std::max(a.xmin, b.xmin); - const int upperX = std::min(a.xmax, b.xmax); - const int lowerY = std::max(a.ymin, b.ymin); - const int upperY = std::min(a.ymax, b.ymax); - const int diffX = lowerX < upperX ? upperX - lowerX : 0; - const int diffY = lowerY < upperY ? upperY - lowerY : 0; - return static_cast(diffX * diffY); -} - -float GetUnion(const BBox &a, const BBox &b) -{ - return GetArea(a) + GetArea(b) - GetIntersection(a, b); -} - -float GetIoU(const BBox &a, const BBox &b) -{ - return GetIntersection(a, b) / GetUnion(a, b); -} - -BBox MergeBoxes(const BBox &a, const BBox &b) -{ - if (!IsValid(a) || !IsValid(b)) - { - return IsValid(a) ? a : b; - } - BBox res; - res.xmin = std::min(a.xmin, b.xmin); - res.xmax = std::max(a.xmax, b.xmax); - res.ymin = std::min(a.ymin, b.ymin); - res.ymax = std::max(a.ymax, b.ymax); - return res; -} - -BBox ClampBox(const BBox &a, const BBox &b) -{ - return {std::max(a.xmin, b.xmin), std::max(a.ymin, b.ymin), std::min(a.xmax, b.xmax), std::min(a.ymax, b.ymax)}; -} - -BBox InterpolateBoxes(float currLeft, float currRight, float currBottom, float currTop, float xScaler, float yScaler, - int currColumn, int currRow, BBoxInterpolationType type, float bboxNorm) -{ - BBox currBoxInfo; - if (type == CONST_INTERPOLATION) - { - float centerX = ((currColumn * xScaler + 0.5) / bboxNorm); - float centerY = ((currRow * yScaler + 0.5) / bboxNorm); - float left = (currLeft - centerX); - float right = (currRight + centerX); - float top = (currTop - centerY); - float bottom = (currBottom + centerY); - currBoxInfo.xmin = left * -bboxNorm; - currBoxInfo.xmax = right * bboxNorm; - currBoxInfo.ymin = top * -bboxNorm; - currBoxInfo.ymax = bottom * bboxNorm; - } - else if (type == IMAGE_INTERPOLATION) - { - int centerX = (int)((currColumn + 0.5f) * xScaler); - int centerY = (int)((currRow + 0.5f) * yScaler); - int left = (int)(currLeft * xScaler); - int right = (int)(currRight * xScaler); - int top = (int)(currTop * yScaler); - int bottom = (int)(currBottom * yScaler); - currBoxInfo.xmin = centerX - left; - currBoxInfo.xmax = centerX + right; - currBoxInfo.ymin = centerY - top; - currBoxInfo.ymax = centerY + bottom; - } - else - { - throw std::runtime_error("invalid bbox interpolation type"); - } - return currBoxInfo; -} - -BBox ScaleBox(const BBox &bbox, float xScaler, float yScaler, BBoxScaleType type) -{ - BBox output; - if (type == NORMAL) - { - int xMin = (int)(bbox.xmin * xScaler + 0.5f); - int yMin = (int)(bbox.ymin * yScaler + 0.5f); - int xMax = (int)(bbox.xmax * xScaler + 0.5f); - int yMax = (int)(bbox.ymax * yScaler + 0.5f); - output = {xMin, yMin, xMax, yMax}; - } - else if (type == CENTER) - { - float xCenter = (bbox.xmax + bbox.xmin) / 2.0f; - float yCenter = (bbox.ymax + bbox.ymin) / 2.0f; - - float width = (bbox.xmax - bbox.xmin) * xScaler; - float height = (bbox.ymax - bbox.ymin) * yScaler; - - output = {int(xCenter - width / 2 + 0.5f), int(yCenter - height / 2 + 0.5f), int(xCenter + width / 2 + 0.5f), - int(yCenter + height / 2 + 0.5f)}; - } - else - { - throw std::runtime_error("invalid bbox scaling type"); - } - return output; -} - -BBox TransformBox(const BBox &bbox, float xScaler, float yScaler, float xOffset, float yOffset) -{ - int xMin = (int)((bbox.xmin + xOffset) * xScaler + 0.5f); - int yMin = (int)((bbox.ymin + yOffset) * yScaler + 0.5f); - int xMax = (int)((bbox.xmax + xOffset) * xScaler + 0.5f); - int yMax = (int)((bbox.ymax + yOffset) * yScaler + 0.5f); - return {xMin, yMin, xMax, yMax}; -} - -BBox SquarifyBox(const BBox &box, const BBox &boundary, float scale) -{ - BBox output = ClampBox(box, boundary); - float updateWH = scale * std::max(output.xmax - output.xmin, output.ymax - output.ymin); - float scaleW = updateWH / float(output.xmax - output.xmin); - float scaleH = updateWH / float(output.ymax - output.ymin); - output = ScaleBox(output, scaleW, scaleH, CENTER); - - output = ClampBox(output, boundary); - int xmin = output.xmin; - int ymin = output.ymin; - int l = std::min(output.xmax - output.xmin, output.ymax - output.ymin); - return {xmin, ymin, xmin + l, ymin + l}; -} - -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/ColorConversions.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/ColorConversions.cpp deleted file mode 100644 index f02d3a9..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/ColorConversions.cpp +++ /dev/null @@ -1,447 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "NppUtils.h" - -#include "cv/tensor_ops/ImageUtils.h" - -#include "cv/core/Memory.h" - -#include -#include -#include - -#include -#include -#include - -namespace cvcore { namespace tensor_ops { - -const float BGR2GRAY_COEFFS[3] = {0.114f, 0.587f, 0.299f}; -const float RGB2GRAY_COEFFS[3] = {0.299f, 0.587f, 0.114f}; - -namespace { - -template -void ConvertColorFormatBatch(Tensor &dst, Tensor &src, ColorConversionType type, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert(src.getDepth() == dst.getDepth()); - - for (int i = 0; i < src.getDepth(); i++) - { - size_t offsetSrc = i * src.getStride(TensorDimension::DEPTH); - size_t offsetDst = i * dst.getStride(TensorDimension::DEPTH); - Tensor srcTmp(src.getWidth(), src.getHeight(), - src.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - src.getData() + offsetSrc, false); - Tensor dstTmp(dst.getWidth(), dst.getHeight(), - dst.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - dst.getData() + offsetDst, false); - ConvertColorFormat(dstTmp, srcTmp, type, stream); - } -} - -template -void InterleavedToPlanarBatch(Tensor &dst, Tensor &src, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert(src.getDepth() == dst.getDepth()); - - for (int i = 0; i < src.getDepth(); i++) - { - size_t offsetSrc = i * src.getStride(TensorDimension::DEPTH); - size_t offsetDst = i * dst.getStride(TensorDimension::DEPTH); - Tensor srcTmp(src.getWidth(), src.getHeight(), - src.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - src.getData() + offsetSrc, false); - Tensor dstTmp(dst.getWidth(), dst.getHeight(), - dst.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - dst.getData() + offsetDst, false); - InterleavedToPlanar(dstTmp, srcTmp, stream); - } -} - -} // anonymous namespace - -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - if (type == BGR2RGB || type == RGB2BGR) - { - const int order[3] = {2, 1, 0}; - NppStatus status = nppiSwapChannels_8u_C3R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {int(src.getWidth()), int(src.getHeight())}, order, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); - } - else - { - throw std::runtime_error("invalid color conversion type"); - } -} - -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream) -{ - ConvertColorFormatBatch(dst, const_cast &>(src), type, stream); -} - -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - if (type == BGR2RGB || type == RGB2BGR) - { - const int order[3] = {2, 1, 0}; - NppStatus status = nppiSwapChannels_16u_C3R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {int(src.getWidth()), int(src.getHeight())}, order, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); - } - else - { - throw std::runtime_error("invalid color conversion type"); - } -} - -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream) -{ - ConvertColorFormatBatch(dst, const_cast &>(src), type, stream); -} - -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - if (type == BGR2RGB || type == RGB2BGR) - { - const int order[3] = {2, 1, 0}; - NppStatus status = nppiSwapChannels_32f_C3R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {int(src.getWidth()), int(src.getHeight())}, order, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); - } - else - { - throw std::runtime_error("invalid color conversion type"); - } -} - -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream) -{ - ConvertColorFormatBatch(dst, const_cast &>(src), type, stream); -} - -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - if (type == BGR2GRAY || type == RGB2GRAY) - { - NppStatus status = nppiColorToGray_8u_C3C1R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {int(src.getWidth()), int(src.getHeight())}, type == BGR2GRAY ? BGR2GRAY_COEFFS : RGB2GRAY_COEFFS, - GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); - } - else - { - throw std::runtime_error("invalid color conversion type"); - } -} - -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - if (type == BGR2GRAY || type == RGB2GRAY) - { - NppStatus status = nppiColorToGray_16u_C3C1R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {int(src.getWidth()), int(src.getHeight())}, type == BGR2GRAY ? BGR2GRAY_COEFFS : RGB2GRAY_COEFFS, - GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); - } - else - { - throw std::runtime_error("invalid color conversion type"); - } -} - -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - if (type == BGR2GRAY || type == RGB2GRAY) - { - NppStatus status = nppiColorToGray_32f_C3C1R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {int(src.getWidth()), int(src.getHeight())}, type == BGR2GRAY ? BGR2GRAY_COEFFS : RGB2GRAY_COEFFS, - GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); - } - else - { - throw std::runtime_error("invalid color conversion type"); - } -} - -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - if (type == GRAY2BGR || type == GRAY2RGB) - { - NppStatus status = nppiDup_8u_C1C3R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {int(src.getWidth()), int(src.getHeight())}, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); - } - else - { - throw std::runtime_error("invalid color conversion type"); - } -} - -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - if (type == GRAY2BGR || type == GRAY2RGB) - { - NppStatus status = nppiDup_16u_C1C3R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {int(src.getWidth()), int(src.getHeight())}, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); - } - else - { - throw std::runtime_error("invalid color conversion type"); - } -} - -void ConvertColorFormat(Tensor &dst, const Tensor &src, ColorConversionType type, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - if (type == GRAY2BGR || type == GRAY2RGB) - { - NppStatus status = nppiDup_32f_C1C3R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {int(src.getWidth()), int(src.getHeight())}, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); - } - else - { - throw std::runtime_error("invalid color conversion type"); - } -} - -void ConvertBitDepth(Tensor &dst, Tensor &src, const float scale, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - const NppiSize srcSize = {src.getWidth(), src.getHeight()}; - - NppStreamContext streamContext = GetNppStreamContext(stream); - - NppStatus status = - nppiMulC_32f_C1IR_Ctx(static_cast(scale), static_cast(src.getData()), - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), srcSize, streamContext); - assert(status == NPP_SUCCESS); - - status = nppiConvert_32f8u_C1R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {int(src.getWidth()), int(src.getHeight())}, NPP_RND_FINANCIAL, streamContext); - assert(status == NPP_SUCCESS); -} - -void ConvertBitDepth(Tensor &dst, Tensor &src, const float scale, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert(src.getDepth() == dst.getDepth()); - - Tensor srcTmp(src.getWidth(), src.getDepth() * src.getHeight(), - src.getStride(TensorDimension::HEIGHT) * GetChannelSize(F32), src.getData(), false); - Tensor dstTmp(dst.getWidth(), dst.getDepth() * dst.getHeight(), - dst.getStride(TensorDimension::HEIGHT) * GetChannelSize(U8), dst.getData(), false); - ConvertBitDepth(dstTmp, srcTmp, scale, stream); -} - -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - NppStatus status; - NppStreamContext streamContext = GetNppStreamContext(stream); - - const size_t offset = dst.getStride(TensorDimension::HEIGHT) * dst.getHeight(); - Npp8u *const dstBuffer[3] = {dst.getData(), dst.getData() + offset, dst.getData() + 2 * offset}; - status = nppiCopy_8u_C3P3R_Ctx(static_cast(src.getData()), - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), dstBuffer, - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {int(src.getWidth()), int(src.getHeight())}, streamContext); - assert(status == NPP_SUCCESS); -} - -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream) -{ - InterleavedToPlanarBatch(dst, const_cast &>(src), stream); -} - -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - NppStatus status; - NppStreamContext streamContext = GetNppStreamContext(stream); - - const size_t offset = dst.getStride(TensorDimension::HEIGHT) * dst.getHeight(); - Npp16u *const dstBuffer[3] = {dst.getData(), dst.getData() + offset, dst.getData() + 2 * offset}; - status = nppiCopy_16u_C3P3R_Ctx(static_cast(src.getData()), - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), dstBuffer, - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {int(src.getWidth()), int(src.getHeight())}, streamContext); -} - -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream) -{ - InterleavedToPlanarBatch(dst, const_cast &>(src), stream); -} - -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - NppStatus status; - NppStreamContext streamContext = GetNppStreamContext(stream); - - const size_t offset = dst.getStride(TensorDimension::HEIGHT) * dst.getHeight(); - Npp32f *const dstBuffer[3] = {dst.getData(), dst.getData() + offset, dst.getData() + 2 * offset}; - status = nppiCopy_32f_C3P3R_Ctx(static_cast(src.getData()), - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), dstBuffer, - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {int(src.getWidth()), int(src.getHeight())}, streamContext); -} - -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream) -{ - InterleavedToPlanarBatch(dst, const_cast &>(src), stream); -} - -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - Tensor tmp(dst.getWidth(), dst.getHeight(), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - dst.getData(), false); - Copy(tmp, src, stream); -} - -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream) -{ - InterleavedToPlanarBatch(dst, const_cast &>(src), stream); -} - -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - Tensor tmp(dst.getWidth(), dst.getHeight(), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - dst.getData(), false); - Copy(tmp, src, stream); -} - -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream) -{ - InterleavedToPlanarBatch(dst, const_cast &>(src), stream); -} - -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert((src.getWidth() == dst.getWidth()) && (src.getHeight() == dst.getHeight())); - - Tensor tmp(dst.getWidth(), dst.getHeight(), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - dst.getData(), false); - Copy(tmp, src, stream); -} - -void InterleavedToPlanar(Tensor &dst, const Tensor &src, cudaStream_t stream) -{ - InterleavedToPlanarBatch(dst, const_cast &>(src), stream); -} - -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/DBScan.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/DBScan.cpp deleted file mode 100644 index 0a3c2ea..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/DBScan.cpp +++ /dev/null @@ -1,214 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cv/tensor_ops/DBScan.h" -#include "cv/tensor_ops/BBoxUtils.h" - -#include -#include - -namespace cvcore { namespace tensor_ops { - -constexpr int kUnclassified = -1; -constexpr int kCorePoint = 1; -constexpr int kBorderPoint = 2; -constexpr int kNoise = -2; - -namespace { - -float CalculateDistance(const BBox &lhs, const BBox &rhs) -{ - const float iou = GetIoU(lhs, rhs); - return 1.0f - iou; -} - -void MergeMaximumBBoxes(Array &input, Array &clusters, Array &output) -{ - BBox tempBox = {-1, -1, -1, -1}; - // Initialize each cluster-box with a placeholder that has no cluster - for (int i = 0; i < output.getSize(); i++) - { - // It's a struct so these pushes are by value - output[i] = tempBox; - } - - for (int i = 0; i < input.getSize(); i++) - { - int clusterId = clusters[i]; - if (clusterId >= 0) - { - // Box merging is associative & commutative - output[clusterId] = MergeBoxes(input[i], output[clusterId]); - } - } -} - -void MergeWeightedBBoxes(Array &input, Array &clusters, Array &weights, Array &output) -{ - int numClusters = output.getSize(); - // centos has gcc 4.8.5 which complains about initializing variable sized arrays with {}. - // Use std::vector for variable sized array. - std::vector xmins(numClusters, 0); - std::vector ymins(numClusters, 0); - std::vector xmaxs(numClusters, 0); - std::vector ymaxs(numClusters, 0); - std::vector scales(numClusters, 0); - - for (int i = 0; i < input.getSize(); i++) - { - int clusterId = clusters[i]; - if (clusterId >= 0) - { - xmins[clusterId] += input[i].xmin * weights[i]; - ymins[clusterId] += input[i].ymin * weights[i]; - xmaxs[clusterId] += input[i].xmax * weights[i]; - ymaxs[clusterId] += input[i].ymax * weights[i]; - scales[clusterId] += weights[i]; - } - } - - for (int i = 0; i < numClusters; i++) - { - output[i] = {int(xmins[i] / scales[i] + 0.5f), int(ymins[i] / scales[i] + 0.5f), - int(xmaxs[i] / scales[i] + 0.5f), int(ymaxs[i] / scales[i] + 0.5f)}; - } -} - -} // anonymous namespace - -DBScan::DBScan(int pointsSize, int minPoints, float epsilon) - : m_pointsSize(pointsSize) - , m_numClusters(0) - , m_minPoints(minPoints) - , m_epsilon(epsilon) - , m_clusterStates(pointsSize, true) -{ - m_clusterStates.setSize(pointsSize); -} - -void DBScan::doCluster(Array &input, Array &clusters) -{ - // Reset all cluster id - for (int i = 0; i < m_pointsSize; i++) - { - clusters[i] = -1; - m_clusterStates[i] = kUnclassified; - } - int nextClusterId = 0; - for (int cIndex = 0; cIndex < m_pointsSize; cIndex++) - { - std::vector neighbors; - for (int neighborIndex = 0; neighborIndex < m_pointsSize; neighborIndex++) - { - if (neighborIndex == cIndex) - { - continue; // Don't look at being your own neighbor - } - if (CalculateDistance(input[cIndex], input[neighborIndex]) <= m_epsilon) - { - // nrighborIndex is in our neighborhood - neighbors.push_back(neighborIndex); - - if (m_clusterStates[neighborIndex] == kCorePoint) - { - // We are at the neighborhood of a core point, we are at least a border point - m_clusterStates[cIndex] = kBorderPoint; - // Take the first cluster number as you can - if (clusters[cIndex] == -1) - { - clusters[cIndex] = clusters[neighborIndex]; - } - } - } - } - if (neighbors.size() >= m_minPoints - 1) - { - m_clusterStates[cIndex] = kCorePoint; - if (clusters[cIndex] == -1) - { - // We're not in the neighborhood of other core points - // So we're the core of a new cluster - clusters[cIndex] = nextClusterId; - nextClusterId++; - } - - // Set all neighbors that came before us to be border points - for (int neighborListIndex = 0; - neighborListIndex < neighbors.size() && neighbors[neighborListIndex] < cIndex; neighborListIndex++) - { - if (m_clusterStates[neighbors[neighborListIndex]] == kNoise) - { - // If it was noise, now it's a border point in our cluster - m_clusterStates[neighbors[neighborListIndex]] = kBorderPoint; - // Make sure everything that's in our neighborhood is our cluster id - clusters[neighbors[neighborListIndex]] = clusters[cIndex]; - } - } - } - else - { - // We are a border point, or a noise point - if (m_clusterStates[cIndex] == kUnclassified) - { - m_clusterStates[cIndex] = kNoise; - clusters[cIndex] = -1; - } - } - } - - m_numClusters = nextClusterId; // Number of clusters -} - -void DBScan::doClusterAndMerge(Array &input, Array &output, BBoxMergeType type) -{ - Array clusters(m_pointsSize, true); - clusters.setSize(m_pointsSize); - doCluster(input, clusters); - output.setSize(m_numClusters); - - // merge bboxes based on different modes (TODO: might add mininum/average in the future) - if (type == MAXIMUM) - { - MergeMaximumBBoxes(input, clusters, output); - } - else - { - throw std::runtime_error("Unsupported bbox merge type."); - } -} - -void DBScan::doClusterAndMerge(Array &input, Array &weights, Array &output, BBoxMergeType type) -{ - Array clusters(m_pointsSize, true); - clusters.setSize(m_pointsSize); - doCluster(input, clusters); - output.setSize(m_numClusters); - - // merge type must be WEIGHTED - if (type != WEIGHTED) - { - throw std::runtime_error("Bbox merge type must be WEIGHTED."); - } - MergeWeightedBBoxes(input, clusters, weights, output); -} - -int DBScan::getNumClusters() const -{ - return m_numClusters; -} - -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/Errors.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/Errors.cpp deleted file mode 100644 index d29a1ae..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/Errors.cpp +++ /dev/null @@ -1,104 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cv/tensor_ops/Errors.h" - -#ifndef __cpp_lib_to_underlying -// Using a C++23 feature by hacking std -namespace std -{ - template - constexpr underlying_type_t to_underlying(Enum e) noexcept - { - return static_cast>(e); - } -}; -#endif // __cpp_lib_to_underlying - -namespace cvcore { namespace tensor_ops { - -namespace detail -{ - struct TensorOpsErrorCategory : std::error_category - { - virtual const char * name() const noexcept override final - { - return "cvcore-tensor-ops-error"; - } - - virtual std::string message(int value) const override final - { - std::string result; - - switch(value) - { - case std::to_underlying(TensorOpsErrorCode::SUCCESS): - result = "(SUCCESS) No errors detected"; - break; - case std::to_underlying(TensorOpsErrorCode::COMPUTE_ENGINE_UNSUPPORTED_BY_CONTEXT): - result = "(COMPUTE_ENGINE_UNSUPPORTED_BY_CONTEXT) The selected compute " - "engine defined by cvcore::ComputeEngine is not avaible in the " - "requested context defined by cvcore::tensor_ops::TensorBackend"; - break; - case std::to_underlying(TensorOpsErrorCode::CAMERA_DISTORTION_MODEL_UNSUPPORTED): - result = "(CAMERA_DISTORTION_MODEL_UNSUPPORTED) The selected camera " - "distortion model defined by cvcore::CameraDistortionType is " - "currently unsupported"; - break; - default: - result = "(Unrecognized Condition) Value " + std::to_string(value) + - " does not map to known error code literal " + - " defined by cvcore::tensor_ops::TensorOpsErrorCode"; - break; - } - - return result; - } - - virtual std::error_condition default_error_condition(int code) const noexcept override final - { - std::error_condition result; - - switch(code) - { - case std::to_underlying(TensorOpsErrorCode::SUCCESS): - result = ErrorCode::SUCCESS; - break; - case std::to_underlying(TensorOpsErrorCode::COMPUTE_ENGINE_UNSUPPORTED_BY_CONTEXT): - result = ErrorCode::INVALID_ENGINE_TYPE; - break; - case std::to_underlying(TensorOpsErrorCode::CAMERA_DISTORTION_MODEL_UNSUPPORTED): - result = ErrorCode::INVALID_ARGUMENT; - break; - default: - result = ErrorCode::NOT_IMPLEMENTED; - break; - } - - return result; - } - }; -} // namespace detail - -const detail::TensorOpsErrorCategory errorCategory{}; - -std::error_code make_error_code(TensorOpsErrorCode ec) noexcept -{ - return {std::to_underlying(ec), errorCategory}; -} - -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/Filters.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/Filters.cpp deleted file mode 100644 index d8bd75c..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/Filters.cpp +++ /dev/null @@ -1,112 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "Filters.h" -#include "NppUtils.h" - -#include "cv/core/MathTypes.h" -#include "cv/core/Memory.h" - -#include - -#include -#include -#include - -namespace cvcore { namespace tensor_ops { - -void BoxFilter(Tensor &dst, const Tensor &src, const Vector2i &maskSize, - const Vector2i &anchor, cudaStream_t stream) -{ - assert(!src.isCPU() && !dst.isCPU()); - NppStatus status = nppiFilterBoxBorder_8u_C3R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {static_cast(src.getWidth()), static_cast(src.getHeight())}, {0, 0}, - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {static_cast(src.getWidth()), static_cast(src.getHeight())}, {maskSize.x, maskSize.y}, - {anchor.x, anchor.y}, - NPP_BORDER_REPLICATE, //Only Npp Replicate is supported!!! - GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -void BoxFilter(Tensor &dst, const Tensor &src, const Vector2i &maskSize, - const Vector2i &anchor, cudaStream_t stream) -{ - assert(!src.isCPU() && !dst.isCPU()); - NppStatus status = nppiFilterBoxBorder_8u_C1R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {static_cast(src.getWidth()), static_cast(src.getHeight())}, {0, 0}, - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {static_cast(src.getWidth()), static_cast(src.getHeight())}, {maskSize.x, maskSize.y}, - {anchor.x, anchor.y}, NPP_BORDER_REPLICATE, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -void BoxFilter(Tensor &dst, const Tensor &src, const Vector2i &maskSize, - const Vector2i &anchor, cudaStream_t stream) -{ - assert(!src.isCPU() && !dst.isCPU()); - NppStatus status = nppiFilterBoxBorder_16u_C1R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {static_cast(src.getWidth()), static_cast(src.getHeight())}, {0, 0}, - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {static_cast(src.getWidth()), static_cast(src.getHeight())}, {maskSize.x, maskSize.y}, - {anchor.x, anchor.y}, NPP_BORDER_REPLICATE, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -void BoxFilter(Tensor &dst, const Tensor &src, const Vector2i &maskSize, - const Vector2i &anchor, cudaStream_t stream) -{ - assert(!src.isCPU() && !dst.isCPU()); - NppStatus status = nppiFilterBoxBorder_16u_C3R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {static_cast(src.getWidth()), static_cast(src.getHeight())}, {0, 0}, - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {static_cast(src.getWidth()), static_cast(src.getHeight())}, {maskSize.x, maskSize.y}, - {anchor.x, anchor.y}, NPP_BORDER_REPLICATE, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -void BoxFilter(Tensor &dst, const Tensor &src, const Vector2i &maskSize, - const Vector2i &anchor, cudaStream_t stream) -{ - assert(!src.isCPU() && !dst.isCPU()); - NppStatus status = nppiFilterBoxBorder_32f_C3R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {static_cast(src.getWidth()), static_cast(src.getHeight())}, {0, 0}, - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {static_cast(src.getWidth()), static_cast(src.getHeight())}, {maskSize.x, maskSize.y}, - {anchor.x, anchor.y}, NPP_BORDER_REPLICATE, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -void BoxFilter(Tensor &dst, const Tensor &src, const Vector2i &maskSize, - const Vector2i &anchor, cudaStream_t stream) -{ - assert(!src.isCPU() && !dst.isCPU()); - NppStatus status = nppiFilterBoxBorder_32f_C1R_Ctx( - static_cast(src.getData()), src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {static_cast(src.getWidth()), static_cast(src.getHeight())}, {0, 0}, - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {static_cast(src.getWidth()), static_cast(src.getHeight())}, {maskSize.x, maskSize.y}, - {anchor.x, anchor.y}, NPP_BORDER_REPLICATE, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/Filters.h b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/Filters.h deleted file mode 100644 index f764b0b..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/Filters.h +++ /dev/null @@ -1,105 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_FILTERS_H -#define CVCORE_FILTERS_H - -#include "cv/core/Tensor.h" -#include "cv/core/MathTypes.h" - -#include - -namespace cvcore { namespace tensor_ops { - -/** - * Box type filtering for three channel HWC format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param maskSize Size of mask which determines number of pixels to be averaged. - * @param anchor Offset of mask relative to current pixel index. - * {0, 0} mask aligns with starting pixel. - * {mask size/2, mask size/2} mask aligns with center pixel index. - * @param stream specified cuda stream. - */ - -void BoxFilter(Tensor &dst, const Tensor &src, const Vector2i &maskSize, - const Vector2i &anchor, cudaStream_t stream = 0); -/** - * Box type filtering for three channel HWC format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param maskSize Size of mask which determines number of pixels to be averaged. - * @param anchor Offset of mask relative to current pixel index. - * {0, 0} mask aligns with starting pixel. - * {mask size/2, mask size/2} mask aligns with center pixel index. - * @param stream specified cuda stream. - */ -void BoxFilter(Tensor &dst, const Tensor &src, const Vector2i &maskSize, - const Vector2i &anchor, cudaStream_t stream = 0); -/** - * Box type filtering for three channel HWC format float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param maskSize Size of mask which determines number of pixels to be averaged. - * @param anchor Offset of mask relative to current pixel index. - * {0, 0} mask aligns with starting pixel. - * {mask size/2, mask size/2} mask aligns with center pixel index. - * @param stream specified cuda stream. - */ -void BoxFilter(Tensor &dst, const Tensor &src, const Vector2i &maskSize, - const Vector2i &anchor, cudaStream_t stream = 0); - -/** - * Box type filtering for one channel HWC format uint_8 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param maskSize Size of mask which determines number of pixels to be averaged. - * @param anchor Offset of mask relative to current pixel index. - * {0, 0} mask aligns with starting pixel. - * {mask size/2, mask size/2} mask aligns with center pixel index. - * @param stream specified cuda stream. - */ -void BoxFilter(Tensor &dst, const Tensor &src, const Vector2i &maskSize, - const Vector2i &anchor, cudaStream_t stream = 0); -/** - * Box type filtering for one channel HWC format uint_16 type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param maskSize Size of mask which determines number of pixels to be averaged. - * @param anchor Offset of mask relative to current pixel index. - * {0, 0} mask aligns with starting pixel. - * {mask size/2, mask size/2} mask aligns with center pixel index. - * @param stream specified cuda stream. - */ -void BoxFilter(Tensor &dst, const Tensor &src, const Vector2i &maskSize, - const Vector2i &anchor, cudaStream_t stream = 0); -/** - * Box type filtering for one channel HWC format float type Tensor. - * @param dst destination tensor. - * @param src source tensor. - * @param maskSize Size of mask which determines number of pixels to be averaged. - * @param anchor Offset of mask relative to current pixel index. - * {0, 0} mask aligns with starting pixel. - * {mask size/2, mask size/2} mask aligns with center pixel index. - * @param stream specified cuda stream. - */ -void BoxFilter(Tensor &dst, const Tensor &src, const Vector2i &maskSize, - const Vector2i &anchor, cudaStream_t stream = 0); - -}} // namespace cvcore::tensor_ops - -#endif // CVCORE_FILTERS_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/FusedOperations.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/FusedOperations.cpp deleted file mode 100644 index 4ae0c3b..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/FusedOperations.cpp +++ /dev/null @@ -1,261 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "NppUtils.h" - -#include "cv/tensor_ops/ImageUtils.h" - -#include "cv/core/Memory.h" - -#include -#include -#include - -namespace cvcore { namespace tensor_ops { - -template -struct ImageToNormalizedPlanarTensorOperator::ImageToNormalizedPlanarTensorOperatorImpl -{ - int m_width; - int m_height; - int m_depth; - std::unique_ptr> m_resizedTensor; - std::unique_ptr> m_normalizedTensor; - - template::type * = nullptr> - ImageToNormalizedPlanarTensorOperatorImpl(int width, int height) - : m_width(width) - , m_height(height) - , m_depth(1) - { - m_resizedTensor.reset(new Tensor(width, height, false)); - m_normalizedTensor.reset(new Tensor(width, height, false)); - } - - template::type * = nullptr> - ImageToNormalizedPlanarTensorOperatorImpl(int width, int height, int depth) - : m_width(width) - , m_height(height) - , m_depth(depth) - { - m_resizedTensor.reset(new Tensor(width, height, depth, false)); - m_normalizedTensor.reset(new Tensor(width, height, depth, false)); - } - - template::type * = nullptr> - void execute(Tensor &dst, const Tensor &src, const float scale[3], - const float offset[3], bool swapRB, bool keep_aspect_ratio, cudaStream_t stream) - { - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - - // dst image width/height must match width/height of class - if ((dst.getWidth() != m_width) || (dst.getHeight() != m_height)) - { - throw std::runtime_error("invalid input width/height"); - } - - // first do the resizing - Resize(*m_resizedTensor, src, keep_aspect_ratio, INTERP_LINEAR, stream); - - // swap channels if needed - if (swapRB) - { - ConvertColorFormat(*m_resizedTensor, *m_resizedTensor, BGR2RGB, stream); - } - - // do the normalization - Normalize(*m_normalizedTensor, *m_resizedTensor, scale, offset, stream); - - // convert interleave to planar tensor - InterleavedToPlanar(dst, *m_normalizedTensor, stream); - } - - template::type * = nullptr> - void execute(Tensor &dst, const Tensor &src, const float scale[3], - const float offset[3], bool swapRB, bool keep_aspect_ratio, cudaStream_t stream) - { - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - - // dst image width/height must match width/height of class - if ((dst.getWidth() != m_width) || (dst.getHeight() != m_height)) - { - throw std::runtime_error("invalid input width/height"); - } - - // dst image depth must be equal to src image depth and no bigger than m_depth - if ((dst.getDepth() != src.getDepth()) || (dst.getDepth() > m_depth)) - { - throw std::runtime_error("invalid input depth"); - } - - // wrap the batch tensor with non-owning tensor - Tensor resizedTensor(m_width, m_height, dst.getDepth(), m_resizedTensor->getData(), false); - Tensor normalizedTensor(m_width, m_height, dst.getDepth(), m_normalizedTensor->getData(), - false); - - // first do the resizing - Resize(resizedTensor, src, keep_aspect_ratio, INTERP_LINEAR, stream); - - // swap channels if needed - if (swapRB) - { - ConvertColorFormat(resizedTensor, resizedTensor, BGR2RGB, stream); - } - - // do the normalization - Normalize(normalizedTensor, resizedTensor, scale, offset, stream); - - // convert interleave to planar tensor - InterleavedToPlanar(dst, normalizedTensor, stream); - } - - template::type * = nullptr> - void execute(Tensor &dst, const Tensor &src, float scale, float offset, - bool keep_aspect_ratio, cudaStream_t stream) - { - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - - // dst image width/height must match width/height of class - if ((dst.getWidth() != m_width) || (dst.getHeight() != m_height)) - { - throw std::runtime_error("invalid input width/height"); - } - - // first do the resizing - Resize(*m_resizedTensor, src, keep_aspect_ratio, INTERP_LINEAR, stream); - - // do the normalization and map to destination tensor directly - Tensor output(m_width, m_height, dst.getData(), false); - Normalize(output, *m_resizedTensor, scale, offset, stream); - } - - template::type * = nullptr> - void execute(Tensor &dst, const Tensor &src, float scale, float offset, - bool keep_aspect_ratio, cudaStream_t stream) - { - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - - // dst image width/height must match width/height of class - if ((dst.getWidth() != m_width) || (dst.getHeight() != m_height)) - { - throw std::runtime_error("invalid input width/height"); - } - - // dst image depth must be equal to src image depth and no bigger than m_depth - if ((dst.getDepth() != src.getDepth()) || (dst.getDepth() > m_depth)) - { - throw std::runtime_error("invalid input depth"); - } - - // wrap the batch tensor with non-owning tensor - Tensor resizedTensor(m_width, m_height, dst.getDepth(), m_resizedTensor->getData(), false); - - // first do the resizing - Resize(resizedTensor, src, keep_aspect_ratio, INTERP_LINEAR, stream); - - // do the normalization and map to destination tensor directly - Tensor output(m_width, m_height, dst.getDepth(), dst.getData(), false); - Normalize(output, resizedTensor, scale, offset, stream); - } -}; - -template -template::type *> -ImageToNormalizedPlanarTensorOperator::ImageToNormalizedPlanarTensorOperator(int width, - int height) - : m_pImpl(new ImageToNormalizedPlanarTensorOperatorImpl(width, height)) -{ - static_assert(TL_IN == HWC && TL_OUT == CHW, "Tensor Layout is different"); - static_assert(CC == C1 || CC == C3, "Channel count is different"); -} - -template -template::type *> -ImageToNormalizedPlanarTensorOperator::ImageToNormalizedPlanarTensorOperator(int width, - int height, - int depth) - : m_pImpl(new ImageToNormalizedPlanarTensorOperatorImpl(width, height, depth)) -{ - static_assert(TL_IN == NHWC && TL_OUT == NCHW, "Tensor Layout is different"); - static_assert(CC == C1 || CC == C3, "Channel count is different"); -} - -template -ImageToNormalizedPlanarTensorOperator::~ImageToNormalizedPlanarTensorOperator() -{ -} - -template -template::type *> -void ImageToNormalizedPlanarTensorOperator::operator()( - Tensor &dst, const Tensor &src, const float scale[3], const float offset[3], - bool swapRB, bool keep_aspect_ratio, cudaStream_t stream) -{ - m_pImpl->execute(dst, src, scale, offset, swapRB, keep_aspect_ratio, stream); -} - -template -template::type *> -void ImageToNormalizedPlanarTensorOperator::operator()(Tensor &dst, - const Tensor &src, - float scale, float offset, - bool keep_aspect_ratio, - cudaStream_t stream) -{ - m_pImpl->execute(dst, src, scale, offset, keep_aspect_ratio, stream); -} - -// explicit instantiations -template class ImageToNormalizedPlanarTensorOperator; -template void ImageToNormalizedPlanarTensorOperator::operator()(Tensor &, - const Tensor &, - const float [], const float [], - bool, bool, cudaStream_t); -template ImageToNormalizedPlanarTensorOperator::ImageToNormalizedPlanarTensorOperator(int, int); - -template class ImageToNormalizedPlanarTensorOperator; -template void ImageToNormalizedPlanarTensorOperator::operator()(Tensor &, - const Tensor &, - float, float, bool, cudaStream_t); -template ImageToNormalizedPlanarTensorOperator::ImageToNormalizedPlanarTensorOperator(int, int); - -template class ImageToNormalizedPlanarTensorOperator; -template void ImageToNormalizedPlanarTensorOperator::operator()(Tensor &, - const Tensor &, - const float [], const float [], - bool, bool, cudaStream_t); -template ImageToNormalizedPlanarTensorOperator::ImageToNormalizedPlanarTensorOperator(int, - int, - int); - -template class ImageToNormalizedPlanarTensorOperator; -template void ImageToNormalizedPlanarTensorOperator::operator()(Tensor &, - const Tensor &, - float, float, bool, - cudaStream_t); -template ImageToNormalizedPlanarTensorOperator::ImageToNormalizedPlanarTensorOperator(int, - int, - int); -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/GeometryTransforms.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/GeometryTransforms.cpp deleted file mode 100644 index 238eac1..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/GeometryTransforms.cpp +++ /dev/null @@ -1,754 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "NppUtils.h" - -#include "cv/tensor_ops/ImageUtils.h" - -#include "cv/core/Memory.h" - -#include -#include - -#include -#include -#include - -namespace cvcore { namespace tensor_ops { - -namespace { - -static NppiInterpolationMode GetNppiInterpolationMode(InterpolationType type) -{ - if (type == INTERP_NEAREST) - { - return NPPI_INTER_NN; - } - else if (type == INTERP_LINEAR) - { - return NPPI_INTER_LINEAR; - } - else if (type == INTERP_CUBIC_BSPLINE) - { - return NPPI_INTER_CUBIC2P_BSPLINE; - } - else if (type == INTERP_CUBIC_CATMULLROM) - { - return NPPI_INTER_CUBIC2P_CATMULLROM; - } - else - { - throw std::runtime_error("invalid resizing interpolation mode"); - } -} - -static BBox GetScaledROI(int srcW, int srcH, int dstW, int dstH) -{ - if (srcW * dstH >= dstW * srcH) - { - int bboxH = static_cast((static_cast(srcH) / srcW) * dstW); - int offsetH = (dstH - bboxH) / 2; - return {0, offsetH, dstW, offsetH + bboxH}; - } - else - { - int bboxW = static_cast((static_cast(srcW) / srcH) * dstH); - int offsetW = (dstW - bboxW) / 2; - return {offsetW, 0, offsetW + bboxW, dstH}; - } -} - -static void AssertValidROI(const BBox &roi, int width, int height) -{ - assert(roi.xmin >= 0 && roi.xmin < roi.xmax); - assert(roi.ymin >= 0 && roi.ymin < roi.ymax); - assert(roi.ymax <= height); - assert(roi.xmax <= width); -} - -template -void FillBufferC1U8Impl(Tensor &dst, const Npp8u value, cudaStream_t stream) -{ - assert(!dst.isCPU()); - NppStatus status = nppiSet_8u_C1R_Ctx(value, static_cast(dst.getData()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {int(dst.getWidth()), int(dst.getHeight())}, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -template -void FillBufferC1U16Impl(Tensor &dst, const Npp16u value, cudaStream_t stream) -{ - assert(!dst.isCPU()); - NppStatus status = nppiSet_16u_C1R_Ctx(value, static_cast(dst.getData()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {int(dst.getWidth()), int(dst.getHeight())}, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -template -void FillBufferC1F32Impl(Tensor &dst, const Npp32f value, cudaStream_t stream) -{ - assert(!dst.isCPU()); - NppStatus status = nppiSet_32f_C1R_Ctx(value, static_cast(dst.getData()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {int(dst.getWidth()), int(dst.getHeight())}, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -static void FillBuffer(Tensor &dst, const Npp8u value, cudaStream_t stream = 0) -{ - FillBufferC1U8Impl(dst, value, stream); -} - -static void FillBuffer(Tensor &dst, const Npp16u value, cudaStream_t stream = 0) -{ - FillBufferC1U16Impl(dst, value, stream); -} - -static void FillBuffer(Tensor &dst, const Npp32f value, cudaStream_t stream = 0) -{ - FillBufferC1F32Impl(dst, value, stream); -} - -static void FillBuffer(Tensor &dst, const Npp8u value, cudaStream_t stream = 0) -{ - FillBufferC1U8Impl(dst, value, stream); -} - -static void FillBuffer(Tensor &dst, const Npp16u value, cudaStream_t stream = 0) -{ - FillBufferC1U16Impl(dst, value, stream); -} - -static void FillBuffer(Tensor &dst, const Npp32f value, cudaStream_t stream = 0) -{ - FillBufferC1F32Impl(dst, value, stream); -} - -static void FillBuffer(Tensor &dst, const Npp8u value, cudaStream_t stream = 0) -{ - assert(!dst.isCPU()); - const Npp8u padding[3] = {value, value, value}; - NppStatus status = nppiSet_8u_C3R_Ctx(padding, static_cast(dst.getData()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {int(dst.getWidth()), int(dst.getHeight())}, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -static void FillBuffer(Tensor &dst, const Npp16u value, cudaStream_t stream = 0) -{ - assert(!dst.isCPU()); - const Npp16u padding[3] = {value, value, value}; - NppStatus status = nppiSet_16u_C3R_Ctx(padding, static_cast(dst.getData()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {int(dst.getWidth()), int(dst.getHeight())}, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -static void FillBuffer(Tensor &dst, const Npp32f value, cudaStream_t stream = 0) -{ - assert(!dst.isCPU()); - const Npp32f padding[3] = {value, value, value}; - NppStatus status = nppiSet_32f_C3R_Ctx(padding, static_cast(dst.getData()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {int(dst.getWidth()), int(dst.getHeight())}, GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -template -void CropAndResizeC1U8Impl(Tensor &dst, const Tensor &src, const BBox &dstROI, - const BBox &srcROI, InterpolationType type, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - AssertValidROI(dstROI, dst.getWidth(), dst.getHeight()); - AssertValidROI(srcROI, src.getWidth(), src.getHeight()); - - NppStatus status = nppiResizeSqrPixel_8u_C1R_Ctx( - static_cast(src.getData() + srcROI.ymin * src.getStride(TensorDimension::HEIGHT) + srcROI.xmin), - {srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, src.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {0, 0, srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, - static_cast(dst.getData() + dstROI.ymin * dst.getStride(TensorDimension::HEIGHT) + dstROI.xmin), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {0, 0, dstROI.xmax - dstROI.xmin, dstROI.ymax - dstROI.ymin}, - double(dstROI.xmax - dstROI.xmin) / double(srcROI.xmax - srcROI.xmin), - double(dstROI.ymax - dstROI.ymin) / double(srcROI.ymax - srcROI.ymin), 0.0, 0.0, GetNppiInterpolationMode(type), - GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -template -void CropAndResizeC1U16Impl(Tensor &dst, const Tensor &src, const BBox &dstROI, - const BBox &srcROI, InterpolationType type, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - AssertValidROI(dstROI, dst.getWidth(), dst.getHeight()); - AssertValidROI(srcROI, src.getWidth(), src.getHeight()); - - NppStatus status = nppiResizeSqrPixel_16u_C1R_Ctx( - static_cast(src.getData() + srcROI.ymin * src.getStride(TensorDimension::HEIGHT) + srcROI.xmin), - {srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, src.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {0, 0, srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, - static_cast(dst.getData() + dstROI.ymin * dst.getStride(TensorDimension::HEIGHT) + dstROI.xmin), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {0, 0, dstROI.xmax - dstROI.xmin, dstROI.ymax - dstROI.ymin}, - double(dstROI.xmax - dstROI.xmin) / double(srcROI.xmax - srcROI.xmin), - double(dstROI.ymax - dstROI.ymin) / double(srcROI.ymax - srcROI.ymin), 0.0, 0.0, GetNppiInterpolationMode(type), - GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -template -void CropAndResizeC1F32Impl(Tensor &dst, const Tensor &src, const BBox &dstROI, - const BBox &srcROI, InterpolationType type, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - AssertValidROI(dstROI, dst.getWidth(), dst.getHeight()); - AssertValidROI(srcROI, src.getWidth(), src.getHeight()); - - NppStatus status = nppiResizeSqrPixel_32f_C1R_Ctx( - static_cast(src.getData() + srcROI.ymin * src.getStride(TensorDimension::HEIGHT) + srcROI.xmin), - {srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {0, 0, srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, - static_cast(dst.getData() + dstROI.ymin * dst.getStride(TensorDimension::HEIGHT) + dstROI.xmin), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {0, 0, dstROI.xmax - dstROI.xmin, dstROI.ymax - dstROI.ymin}, - double(dstROI.xmax - dstROI.xmin) / double(srcROI.xmax - srcROI.xmin), - double(dstROI.ymax - dstROI.ymin) / double(srcROI.ymax - srcROI.ymin), 0.0, 0.0, GetNppiInterpolationMode(type), - GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -template -void ResizeImpl(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, InterpolationType type, - cudaStream_t stream) -{ - const BBox dstROI = keep_aspect_ratio - ? GetScaledROI(src.getWidth(), src.getHeight(), dst.getWidth(), dst.getHeight()) - : BBox{0, 0, int(dst.getWidth()), int(dst.getHeight())}; - if (keep_aspect_ratio) - { - FillBuffer(dst, 0, stream); - } - CropAndResize(dst, src, dstROI, {0, 0, int(src.getWidth()), int(src.getHeight())}, type, stream); -} - -template -void CropC1U8Impl(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - AssertValidROI(srcROI, src.getWidth(), src.getHeight()); - assert(srcROI.xmax - srcROI.xmin == dst.getWidth() && srcROI.ymax - srcROI.ymin == dst.getHeight()); - - NppStatus status = nppiCopy_8u_C1R_Ctx( - static_cast(src.getData() + srcROI.ymin * src.getStride(TensorDimension::HEIGHT) + srcROI.xmin), - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), static_cast(dst.getData()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), {srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, - GetNppStreamContext(stream)); - - assert(status == NPP_SUCCESS); -} - -template -void CropC1U16Impl(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - AssertValidROI(srcROI, src.getWidth(), src.getHeight()); - assert(srcROI.xmax - srcROI.xmin == dst.getWidth() && srcROI.ymax - srcROI.ymin == dst.getHeight()); - - NppStatus status = nppiCopy_16u_C1R_Ctx( - static_cast(src.getData() + srcROI.ymin * src.getStride(TensorDimension::HEIGHT) + srcROI.xmin), - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), static_cast(dst.getData()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), {srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, - GetNppStreamContext(stream)); - - assert(status == NPP_SUCCESS); -} - -template -void CropC1F32Impl(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - AssertValidROI(srcROI, src.getWidth(), src.getHeight()); - assert(srcROI.xmax - srcROI.xmin == dst.getWidth() && srcROI.ymax - srcROI.ymin == dst.getHeight()); - - NppStatus status = nppiCopy_32f_C1R_Ctx( - static_cast(src.getData() + srcROI.ymin * src.getStride(TensorDimension::HEIGHT) + srcROI.xmin), - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), static_cast(dst.getData()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), {srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, - GetNppStreamContext(stream)); - - assert(status == NPP_SUCCESS); -} - -template -void ResizeBatch(Tensor &dst, Tensor &src, bool keep_aspect_ratio, InterpolationType type, - cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - assert(src.getDepth() == dst.getDepth()); - - for (int i = 0; i < src.getDepth(); i++) - { - size_t offsetSrc = i * src.getStride(TensorDimension::DEPTH); - size_t offsetDst = i * dst.getStride(TensorDimension::DEPTH); - Tensor srcTmp(src.getWidth(), src.getHeight(), - src.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - src.getData() + offsetSrc, false); - Tensor dstTmp(dst.getWidth(), dst.getHeight(), - dst.getStride(TensorDimension::HEIGHT) * GetChannelSize(CT), - dst.getData() + offsetDst, false); - Resize(dstTmp, srcTmp, keep_aspect_ratio, type, stream); - } -} - -} // anonymous namespace - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, InterpolationType type, - cudaStream_t stream) -{ - ResizeImpl(dst, src, keep_aspect_ratio, type, stream); -} - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, InterpolationType type, - cudaStream_t stream) -{ - ResizeBatch(dst, const_cast &>(src), keep_aspect_ratio, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - CropAndResizeC1U8Impl(dst, src, dstROI, srcROI, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, InterpolationType type, - cudaStream_t stream) -{ - CropAndResize(dst, src, {0, 0, int(dst.getWidth()), int(dst.getHeight())}, srcROI, type, stream); -} - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, InterpolationType type, - cudaStream_t stream) -{ - ResizeImpl(dst, src, keep_aspect_ratio, type, stream); -} - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, - InterpolationType type, cudaStream_t stream) -{ - ResizeBatch(dst, const_cast &>(src), keep_aspect_ratio, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - CropAndResizeC1U16Impl(dst, src, dstROI, srcROI, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - CropAndResize(dst, src, {0, 0, int(dst.getWidth()), int(dst.getHeight())}, srcROI, type, stream); -} - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, InterpolationType type, - cudaStream_t stream) -{ - ResizeImpl(dst, src, keep_aspect_ratio, type, stream); -} - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, - InterpolationType type, cudaStream_t stream) -{ - ResizeBatch(dst, const_cast &>(src), keep_aspect_ratio, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - CropAndResizeC1F32Impl(dst, src, dstROI, srcROI, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - CropAndResize(dst, src, {0, 0, int(dst.getWidth()), int(dst.getHeight())}, srcROI, type, stream); -} - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, InterpolationType type, - cudaStream_t stream) -{ - ResizeImpl(dst, src, keep_aspect_ratio, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - CropAndResizeC1U8Impl(dst, src, dstROI, srcROI, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, InterpolationType type, - cudaStream_t stream) -{ - CropAndResize(dst, src, {0, 0, int(dst.getWidth()), int(dst.getHeight())}, srcROI, type, stream); -} - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, InterpolationType type, - cudaStream_t stream) -{ - ResizeImpl(dst, src, keep_aspect_ratio, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - CropAndResizeC1U16Impl(dst, src, dstROI, srcROI, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - CropAndResize(dst, src, {0, 0, int(dst.getWidth()), int(dst.getHeight())}, srcROI, type, stream); -} - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, InterpolationType type, - cudaStream_t stream) -{ - ResizeImpl(dst, src, keep_aspect_ratio, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - CropAndResizeC1F32Impl(dst, src, dstROI, srcROI, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - CropAndResize(dst, src, {0, 0, int(dst.getWidth()), int(dst.getHeight())}, srcROI, type, stream); -} - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, InterpolationType type, - cudaStream_t stream) -{ - ResizeImpl(dst, src, keep_aspect_ratio, type, stream); -} - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, InterpolationType type, - cudaStream_t stream) -{ - ResizeBatch(dst, const_cast &>(src), keep_aspect_ratio, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - AssertValidROI(dstROI, dst.getWidth(), dst.getHeight()); - AssertValidROI(srcROI, src.getWidth(), src.getHeight()); - - NppStatus status = nppiResizeSqrPixel_8u_C3R_Ctx( - static_cast(src.getData() + srcROI.ymin * src.getStride(TensorDimension::HEIGHT) + - srcROI.xmin * src.getChannelCount()), - {srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, src.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {0, 0, srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, - static_cast(dst.getData() + dstROI.ymin * dst.getStride(TensorDimension::HEIGHT) + - dstROI.xmin * dst.getChannelCount()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {0, 0, dstROI.xmax - dstROI.xmin, dstROI.ymax - dstROI.ymin}, - double(dstROI.xmax - dstROI.xmin) / double(srcROI.xmax - srcROI.xmin), - double(dstROI.ymax - dstROI.ymin) / double(srcROI.ymax - srcROI.ymin), 0.0, 0.0, GetNppiInterpolationMode(type), - GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, InterpolationType type, - cudaStream_t stream) -{ - CropAndResize(dst, src, {0, 0, int(dst.getWidth()), int(dst.getHeight())}, srcROI, type, stream); -} - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, InterpolationType type, - cudaStream_t stream) -{ - ResizeImpl(dst, src, keep_aspect_ratio, type, stream); -} - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, - InterpolationType type, cudaStream_t stream) -{ - ResizeBatch(dst, const_cast &>(src), keep_aspect_ratio, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - AssertValidROI(dstROI, dst.getWidth(), dst.getHeight()); - AssertValidROI(srcROI, src.getWidth(), src.getHeight()); - - NppStatus status = nppiResizeSqrPixel_16u_C3R_Ctx( - static_cast(src.getData() + srcROI.ymin * src.getStride(TensorDimension::HEIGHT) + - srcROI.xmin * src.getChannelCount()), - {srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, src.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {0, 0, srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, - static_cast(dst.getData() + dstROI.ymin * dst.getStride(TensorDimension::HEIGHT) + - dstROI.xmin * dst.getChannelCount()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {0, 0, dstROI.xmax - dstROI.xmin, dstROI.ymax - dstROI.ymin}, - double(dstROI.xmax - dstROI.xmin) / double(srcROI.xmax - srcROI.xmin), - double(dstROI.ymax - dstROI.ymin) / double(srcROI.ymax - srcROI.ymin), 0.0, 0.0, GetNppiInterpolationMode(type), - GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - CropAndResize(dst, src, {0, 0, int(dst.getWidth()), int(dst.getHeight())}, srcROI, type, stream); -} - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, InterpolationType type, - cudaStream_t stream) -{ - ResizeImpl(dst, src, keep_aspect_ratio, type, stream); -} - -void Resize(Tensor &dst, const Tensor &src, bool keep_aspect_ratio, - InterpolationType type, cudaStream_t stream) -{ - ResizeBatch(dst, const_cast &>(src), keep_aspect_ratio, type, stream); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &dstROI, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - AssertValidROI(dstROI, dst.getWidth(), dst.getHeight()); - AssertValidROI(srcROI, src.getWidth(), src.getHeight()); - - NppStatus status = nppiResizeSqrPixel_32f_C3R_Ctx( - static_cast(src.getData() + srcROI.ymin * src.getStride(TensorDimension::HEIGHT) + - srcROI.xmin * src.getChannelCount()), - {srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {0, 0, srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, - static_cast(dst.getData() + dstROI.ymin * dst.getStride(TensorDimension::HEIGHT) + - dstROI.xmin * dst.getChannelCount()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {0, 0, dstROI.xmax - dstROI.xmin, dstROI.ymax - dstROI.ymin}, - double(dstROI.xmax - dstROI.xmin) / double(srcROI.xmax - srcROI.xmin), - double(dstROI.ymax - dstROI.ymin) / double(srcROI.ymax - srcROI.ymin), 0.0, 0.0, GetNppiInterpolationMode(type), - GetNppStreamContext(stream)); - assert(status == NPP_SUCCESS); -} - -void CropAndResize(Tensor &dst, const Tensor &src, const BBox &srcROI, - InterpolationType type, cudaStream_t stream) -{ - CropAndResize(dst, src, {0, 0, int(dst.getWidth()), int(dst.getHeight())}, srcROI, type, stream); -} - -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream) -{ - CropC1U8Impl(dst, src, srcROI, stream); -} - -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream) -{ - CropC1U16Impl(dst, src, srcROI, stream); -} - -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream) -{ - CropC1F32Impl(dst, src, srcROI, stream); -} - -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream) -{ - CropC1U8Impl(dst, src, srcROI, stream); -} - -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream) -{ - CropC1U16Impl(dst, src, srcROI, stream); -} - -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream) -{ - CropC1F32Impl(dst, src, srcROI, stream); -} - -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - AssertValidROI(srcROI, src.getWidth(), src.getHeight()); - assert(srcROI.xmax - srcROI.xmin == dst.getWidth() && srcROI.ymax - srcROI.ymin == dst.getHeight()); - - NppStatus status = nppiCopy_8u_C3R_Ctx( - static_cast(src.getData() + srcROI.ymin * src.getStride(TensorDimension::HEIGHT) + - srcROI.xmin * src.getChannelCount()), - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), static_cast(dst.getData()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), {srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, - GetNppStreamContext(stream)); - - assert(status == NPP_SUCCESS); -} - -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - AssertValidROI(srcROI, src.getWidth(), src.getHeight()); - assert(srcROI.xmax - srcROI.xmin == dst.getWidth() && srcROI.ymax - srcROI.ymin == dst.getHeight()); - - NppStatus status = nppiCopy_16u_C3R_Ctx( - static_cast(src.getData() + srcROI.ymin * src.getStride(TensorDimension::HEIGHT) + - srcROI.xmin * src.getChannelCount()), - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), static_cast(dst.getData()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), {srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, - GetNppStreamContext(stream)); - - assert(status == NPP_SUCCESS); -} - -void Crop(Tensor &dst, const Tensor &src, const BBox &srcROI, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - AssertValidROI(srcROI, src.getWidth(), src.getHeight()); - assert(srcROI.xmax - srcROI.xmin == dst.getWidth() && srcROI.ymax - srcROI.ymin == dst.getHeight()); - - NppStatus status = nppiCopy_32f_C3R_Ctx( - static_cast(src.getData() + srcROI.ymin * src.getStride(TensorDimension::HEIGHT) + - srcROI.xmin * src.getChannelCount()), - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), static_cast(dst.getData()), - dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), {srcROI.xmax - srcROI.xmin, srcROI.ymax - srcROI.ymin}, - GetNppStreamContext(stream)); - - assert(status == NPP_SUCCESS); -} - -void WarpPerspective(Tensor &dst, const Tensor &src, const double coeffs[3][3], - InterpolationType type, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - - NppStatus status = nppiWarpPerspective_8u_C1R_Ctx( - static_cast(src.getData()), {int(src.getWidth()), int(src.getHeight())}, - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), {0, 0, int(src.getWidth()), int(src.getHeight())}, - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {0, 0, int(dst.getWidth()), int(dst.getHeight())}, coeffs, GetNppiInterpolationMode(type), - GetNppStreamContext(stream)); - - assert(status == NPP_SUCCESS); -} - -void WarpPerspective(Tensor &dst, const Tensor &src, const double coeffs[3][3], - InterpolationType type, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - - NppStatus status = nppiWarpPerspective_16u_C1R_Ctx( - static_cast(src.getData()), {int(src.getWidth()), int(src.getHeight())}, - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), {0, 0, int(src.getWidth()), int(src.getHeight())}, - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {0, 0, int(dst.getWidth()), int(dst.getHeight())}, coeffs, GetNppiInterpolationMode(type), - GetNppStreamContext(stream)); - - assert(status == NPP_SUCCESS); -} - -void WarpPerspective(Tensor &dst, const Tensor &src, const double coeffs[3][3], - InterpolationType type, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - - NppStatus status = nppiWarpPerspective_32f_C1R_Ctx( - static_cast(src.getData()), {int(src.getWidth()), int(src.getHeight())}, - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), {0, 0, int(src.getWidth()), int(src.getHeight())}, - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {0, 0, int(dst.getWidth()), int(dst.getHeight())}, coeffs, GetNppiInterpolationMode(type), - GetNppStreamContext(stream)); - - assert(status == NPP_SUCCESS); -} - -void WarpPerspective(Tensor &dst, const Tensor &src, const double coeffs[3][3], - InterpolationType type, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - - NppStatus status = nppiWarpPerspective_8u_C3R_Ctx( - static_cast(src.getData()), {int(src.getWidth()), int(src.getHeight())}, - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), {0, 0, int(src.getWidth()), int(src.getHeight())}, - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp8u), - {0, 0, int(dst.getWidth()), int(dst.getHeight())}, coeffs, GetNppiInterpolationMode(type), - GetNppStreamContext(stream)); - - assert(status == NPP_SUCCESS); -} - -void WarpPerspective(Tensor &dst, const Tensor &src, const double coeffs[3][3], - InterpolationType type, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - - NppStatus status = nppiWarpPerspective_16u_C3R_Ctx( - static_cast(src.getData()), {int(src.getWidth()), int(src.getHeight())}, - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), {0, 0, int(src.getWidth()), int(src.getHeight())}, - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp16u), - {0, 0, int(dst.getWidth()), int(dst.getHeight())}, coeffs, GetNppiInterpolationMode(type), - GetNppStreamContext(stream)); - - assert(status == NPP_SUCCESS); -} - -void WarpPerspective(Tensor &dst, const Tensor &src, const double coeffs[3][3], - InterpolationType type, cudaStream_t stream) -{ - // src and dst must be GPU tensors - assert(!src.isCPU() && !dst.isCPU()); - - NppStatus status = nppiWarpPerspective_32f_C3R_Ctx( - static_cast(src.getData()), {int(src.getWidth()), int(src.getHeight())}, - src.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), {0, 0, int(src.getWidth()), int(src.getHeight())}, - static_cast(dst.getData()), dst.getStride(TensorDimension::HEIGHT) * sizeof(Npp32f), - {0, 0, int(dst.getWidth()), int(dst.getHeight())}, coeffs, GetNppiInterpolationMode(type), - GetNppStreamContext(stream)); - - assert(status == NPP_SUCCESS); -} - -}} // namespace cvcore::tensor_ops \ No newline at end of file diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/NppUtils.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/NppUtils.cpp deleted file mode 100644 index 4c7cf7f..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/NppUtils.cpp +++ /dev/null @@ -1,116 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "NppUtils.h" - -#include -#include -#include -#include - -namespace cvcore { namespace tensor_ops { - -constexpr size_t CACHE_SIZE = 20; -static size_t timestamp = 0; -std::mutex lock; - -namespace { - -// This function involves GPU query and can be really slow -void SetupNppStreamContext(NppStreamContext &context, cudaStream_t stream) -{ - context.hStream = stream; - cudaError_t error = cudaGetDevice(&context.nCudaDeviceId); - if (error != cudaSuccess) - { - throw std::runtime_error("no devices supporting CUDA"); - } - error = cudaStreamGetFlags(context.hStream, &context.nStreamFlags); - if (error != cudaSuccess) - { - throw std::runtime_error("failed to get cuda stream flags"); - } - - cudaDeviceProp deviceProp; - error = cudaGetDeviceProperties(&deviceProp, context.nCudaDeviceId); - if (error != cudaSuccess) - { - throw std::runtime_error("no device properties"); - } - - context.nSharedMemPerBlock = deviceProp.sharedMemPerBlock; - context.nMaxThreadsPerBlock = deviceProp.maxThreadsPerBlock; - context.nMultiProcessorCount = deviceProp.multiProcessorCount; - context.nMaxThreadsPerMultiProcessor = deviceProp.maxThreadsPerMultiProcessor; - - // Refer - https://gitlab-master.nvidia.com/cv/core-modules/tensor_ops/-/merge_requests/48#note_6602087 - context.nReserved0 = 0; - - error = cudaDeviceGetAttribute(&(context.nCudaDevAttrComputeCapabilityMajor), cudaDevAttrComputeCapabilityMajor, - context.nCudaDeviceId); - if (error != cudaSuccess) - { - throw std::runtime_error("no device attribute - nCudaDevAttrComputeCapabilityMajor"); - } - - error = cudaDeviceGetAttribute(&(context.nCudaDevAttrComputeCapabilityMinor), cudaDevAttrComputeCapabilityMinor, - context.nCudaDeviceId); - if (error != cudaSuccess) - { - throw std::runtime_error("no device attribute - nCudaDevAttrComputeCapabilityMinor"); - } -} - -} // anonymous namespace - -struct Context -{ - NppStreamContext nppContext; - size_t time = 0; -}; - -NppStreamContext GetNppStreamContext(cudaStream_t stream) -{ - // Create a memory cache, all timestamp would be initialzed to 0 automatically - static std::array contextCache = {}; - - // Lock the thread - std::lock_guard guard(lock); - - size_t minTimestamp = contextCache[0].time; - size_t minIdx = 0; - for (size_t i = 0; i < CACHE_SIZE; i++) - { - auto &it = contextCache[i]; - if (it.time > 0 && it.nppContext.hStream == stream) - { - it.time = ++timestamp; - return it.nppContext; - } - if (it.time < minTimestamp) - { - minTimestamp = it.time; - minIdx = i; - } - } - auto &it = contextCache[minIdx]; - SetupNppStreamContext(it.nppContext, stream); - it.time = ++timestamp; - return it.nppContext; -} - -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/OneEuroFilter.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/OneEuroFilter.cpp deleted file mode 100644 index 35041b8..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/OneEuroFilter.cpp +++ /dev/null @@ -1,288 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cv/tensor_ops/OneEuroFilter.h" -#include "cv/core/MathTypes.h" -#include "cv/core/Traits.h" - -#include -#include -#include - -#ifdef NVBENCH_ENABLE -#include -#endif - -namespace cvcore { namespace tensor_ops { - -namespace { - -// 1/(2*PI) -constexpr float kOneOver2Pi = 0.15915494309189533577f; - -// Utilities to get template type from another template type -template -struct deduceDataType; -template -struct deduceDataType::value || std::is_same::value || - std::is_same::value>::type> -{ - typedef float U; -}; -template -struct deduceDataType::value || std::is_same::value || - std::is_same::value>::type> -{ - typedef double U; -}; - -} // namespace - -/* Low pass filter to apply exponential smoothing*/ -template -class LowPassfilter -{ -public: - LowPassfilter() - { - m_firstIteration = true; - } - - void resetState() - { - m_firstIteration = true; - } - - bool isInitialized() const - { - return !m_firstIteration; - } - - T getPreviousValue() const - { - return m_prevfilteredValue; - } - - std::error_code filter(T &outValue, T inValue, float alpha) - { -#ifdef NVBENCH_ENABLE - std::string funcName = "LowPassFilter_"; - std::string tag = funcName + typeid(T).name(); - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - if (m_firstIteration) - { - outValue = inValue; - m_firstIteration = false; - } - else - { - outValue = m_prevfilteredValue + (inValue - m_prevfilteredValue) * alpha; - } - m_prevRawValue = inValue; - m_prevfilteredValue = outValue; - return ErrorCode::SUCCESS; - } - -private: - bool m_firstIteration; - T m_prevRawValue; - T m_prevfilteredValue; -}; - -template -struct OneEuroFilterState -{ - // Computes alpha value for the filter - float getAlpha(float dataUpdateRate, float cutOffFreq) const - { - float alpha = cutOffFreq / (dataUpdateRate * kOneOver2Pi + cutOffFreq); - return alpha; - } - - // Resets the parameters and state of the filter - std::error_code resetParams(const OneEuroFilterParams &filterParams) - { - if (filterParams.dataUpdateRate <= 0.0f || filterParams.minCutoffFreq <= 0 || filterParams.derivCutoffFreq <= 0) - { - return ErrorCode::INVALID_ARGUMENT; - } - m_freq = filterParams.dataUpdateRate; - m_mincutoff = filterParams.minCutoffFreq; - m_cutOffSlope = filterParams.cutoffSlope; - m_derivCutOff = filterParams.derivCutoffFreq; - m_alphadxFilt = getAlpha(m_freq, filterParams.derivCutoffFreq); - - xFilt->resetState(); - dxFilt->resetState(); - - m_currfilteredValue = 0.0f; - m_prevfilteredValue = m_currfilteredValue; - return ErrorCode::SUCCESS; - } - - // Constructor for each filter state - OneEuroFilterState(const OneEuroFilterParams &filterParams) - { - xFilt.reset(new LowPassfilter()); - dxFilt.reset(new LowPassfilter()); - auto err = resetParams(filterParams); - if (err != make_error_code(ErrorCode::SUCCESS)) - { - throw err; - } - } - - std::error_code filter(U &outValue, U value) - { -#ifdef NVBENCH_ENABLE - std::string funcName = "OneEuroFilterState_"; - std::string tag = funcName + typeid(U).name(); - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - m_prevfilteredValue = m_currfilteredValue; - U dxValue = xFilt->isInitialized() ? (value - xFilt->getPreviousValue()) * m_freq : 0.0f; - U edxValue; - auto err = dxFilt->filter(edxValue, dxValue, m_alphadxFilt); - if (err != make_error_code(ErrorCode::SUCCESS)) - { - return err; - } - // Update the new cutoff frequency - U newCutoff = m_mincutoff + m_cutOffSlope * fabsf(edxValue); - float newAlpha = getAlpha(m_freq, newCutoff); - err = xFilt->filter(m_currfilteredValue, value, newAlpha); - if (err != make_error_code(ErrorCode::SUCCESS)) - { - return err; - } - - outValue = m_currfilteredValue; - return ErrorCode::SUCCESS; - } - std::unique_ptr> xFilt; - std::unique_ptr> dxFilt; - float m_alphadxFilt; - float m_freq; - float m_mincutoff; - float m_cutOffSlope; - float m_derivCutOff; - U m_prevfilteredValue; - U m_currfilteredValue; -}; - -template -struct OneEuroFilter::OneEuroFilterImpl -{ - typedef typename deduceDataType::U DT; - OneEuroFilterImpl(const OneEuroFilterParams &filterParams) - { - size_t numStates = traits::get_dim::value; - m_states.resize(numStates); - for (size_t i = 0; i < m_states.size(); i++) - { - m_states[i].reset(new OneEuroFilterState
(filterParams)); - } - } - - std::error_code resetParams(const OneEuroFilterParams &filterParams) - { - std::error_code err = ErrorCode::SUCCESS; - for (size_t i = 0; i < m_states.size(); i++) - { - err = m_states[i]->resetParams(filterParams); - if (err != make_error_code(ErrorCode::SUCCESS)) - { - return err; - } - } - return ErrorCode::SUCCESS; - } - - ~OneEuroFilterImpl() {} - - template::value == 1>::type * = nullptr> - std::error_code filter(U &outValue, U value) - { - if (m_states.size() != 1) - { - return ErrorCode::INVALID_OPERATION; - } - std::error_code err = m_states[0]->filter(outValue, value); - return err; - } - - template::value != 1>::type * = nullptr> - std::error_code filter(U &outValue, U value) - { - if (m_states.size() <= 1) - { - return ErrorCode::INVALID_OPERATION; - } - std::error_code err = ErrorCode::SUCCESS; - for (size_t i = 0; i < m_states.size(); i++) - { - err = m_states[i]->filter(outValue[i], value[i]); - if (err != make_error_code(ErrorCode::SUCCESS)) - { - return err; - } - } - - return err; - } - - std::vector>> m_states; -}; - -template -OneEuroFilter::OneEuroFilter(const OneEuroFilterParams &filterParams) - : m_pImpl(new OneEuroFilterImpl(filterParams)) -{ -} - -template -OneEuroFilter::~OneEuroFilter() -{ -} - -template -std::error_code OneEuroFilter::resetParams(const OneEuroFilterParams &filterParams) -{ - auto err = m_pImpl->resetParams(filterParams); - return err; -} - -template -std::error_code OneEuroFilter::execute(T &filteredValue, T inValue) -{ -#ifdef NVBENCH_ENABLE - std::string funcName = "OneEuroFilter_"; - std::string tag = funcName + typeid(T).name(); - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - auto err = m_pImpl->filter(filteredValue, inValue); - return err; -} - -template class OneEuroFilter; -template class OneEuroFilter; -template class OneEuroFilter; -template class OneEuroFilter; -template class OneEuroFilter; -template class OneEuroFilter; -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/TensorOperators.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/TensorOperators.cpp deleted file mode 100644 index 02d34ca..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/TensorOperators.cpp +++ /dev/null @@ -1,116 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cv/tensor_ops/TensorOperators.h" - -#include - -#ifdef ENABLE_VPI -#include "vpi/VPITensorOperators.h" -#endif - -namespace cvcore { namespace tensor_ops { - -typename TensorContextFactory::MultitonType TensorContextFactory::instances; -std::mutex TensorContextFactory::instanceMutex; - -std::error_code TensorContextFactory::CreateContext(TensorOperatorContext &tensorContext, TensorBackend backend) -{ - using PairType = typename TensorContextFactory::MultitonType::mapped_type; - using CounterType = typename PairType::first_type; - using ValuePtrType = typename PairType::second_type; - - std::lock_guard instanceLock(instanceMutex); - - std::error_code result = ErrorCode::SUCCESS; - - tensorContext = nullptr; - - auto contextItr = instances.find(backend); - if (contextItr == instances.end() && IsBackendSupported(backend)) - { - switch (backend) - { - case TensorBackend::VPI: -#ifdef ENABLE_VPI - try - { - instances[backend] = std::make_pair(1, ValuePtrType(new VPITensorContext{})); - } - catch (std::error_code &e) - { - result = e; - } - catch (...) - { - result = ErrorCode::INVALID_OPERATION; - } -#else // _WIN32 - result = ErrorCode::NOT_IMPLEMENTED; -#endif // _WIN32 - break; - default: - result = ErrorCode::NOT_IMPLEMENTED; - break; - } - tensorContext = instances[backend].second.get(); - } - else - { - contextItr->second.first++; - tensorContext = contextItr->second.second.get(); - } - - return result; -} - -std::error_code TensorContextFactory::DestroyContext(TensorOperatorContext &context) -{ - std::lock_guard instanceLock(instanceMutex); - - auto backend = context->Backend(); - context = nullptr; - auto contextItr = instances.find(backend); - if (contextItr != instances.end()) - { - contextItr->second.first--; - if (contextItr->second.first == 0) - { - instances.erase(backend); - } - } - return ErrorCode::SUCCESS; -} - -bool TensorContextFactory::IsBackendSupported(TensorBackend backend) -{ - bool result = false; - - switch (backend) - { - case TensorBackend::VPI: -#ifdef ENABLE_VPI - result = true; -#endif // _WIN32 - break; - default: - break; - } - - return result; -} -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIColorConvertImpl.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIColorConvertImpl.cpp deleted file mode 100644 index 693ca5c..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIColorConvertImpl.cpp +++ /dev/null @@ -1,135 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include -#include -#include -#include - -#include "cv/core/CameraModel.h" -#include "cv/core/Image.h" -#include "cv/core/Memory.h" -#include "cv/core/ProfileUtils.h" -#include "cv/core/Tensor.h" -#include "cv/tensor_ops/ImageUtils.h" - -#include "VPIColorConvertImpl.h" -#include "VPIEnumMapping.h" -#include "VPIStatusMapping.h" - -// VPI includes -#include -#include -#include -#include - -#ifdef NVBENCH_ENABLE -#include -#endif - -namespace cvcore { namespace tensor_ops { - -VPITensorStream::VPIColorConvertImpl::VPIColorConvertImpl() - : m_inputImage(nullptr) - , m_outputImage(nullptr) -{ - std::memset(reinterpret_cast(&m_inputImageData), 0, sizeof(VPIImageData)); - std::memset(reinterpret_cast(&m_outputImageData), 0, sizeof(VPIImageData)); -} - -template -std::error_code VPITensorStream::VPIColorConvertImpl::execute(Image &outputImage, const Image &inputImage, - VPIStream &stream, VPIBackend backend) -{ - std::error_code errCode = make_error_code(VPIStatus::VPI_SUCCESS); - - bool paramsChanged = m_inputImage == nullptr || m_outputImage == nullptr || - CheckParamsChanged(m_inputImageData, inputImage) || - CheckParamsChanged(m_outputImageData, outputImage); - if (paramsChanged) - { - DestroyVPIImageWrapper(m_inputImage, m_inputImageData); - DestroyVPIImageWrapper(m_outputImage, m_outputImageData); - errCode = CreateVPIImageWrapper(m_inputImage, m_inputImageData, inputImage, backend); - if (errCode == make_error_code(VPI_SUCCESS)) - { - errCode = CreateVPIImageWrapper(m_outputImage, m_outputImageData, outputImage, backend); - } - } - - if (errCode == make_error_code(VPIStatus::VPI_SUCCESS)) - { - errCode = UpdateImage(m_inputImage, m_inputImageData, inputImage); - } - if (errCode == make_error_code(VPIStatus::VPI_SUCCESS)) - { - errCode = UpdateImage(m_outputImage, m_outputImageData, outputImage); - } - - if (errCode == make_error_code(VPIStatus::VPI_SUCCESS)) - { -#ifdef NVBENCH_ENABLE - std::string tag = "VPIColorConvert_" + GetMemoryTypeAsString(inputImage.isCPU()) +"Input_" + GetMemoryTypeAsString(outputImage.isCPU()) +"Output_" + getVPIBackendString(backend) + "Backend"; - nv::bench::Timer timerFunc = - nv::bench::VPI(tag.c_str(), nv::bench::Flag::DEFAULT, stream); -#endif - errCode = make_error_code(vpiSubmitConvertImageFormat(stream, backend, m_inputImage, m_outputImage, nullptr)); - } - - if (errCode == make_error_code(VPIStatus::VPI_SUCCESS)) - { - errCode = make_error_code(vpiStreamSync(stream)); - } - - if (errCode != make_error_code(VPIStatus::VPI_SUCCESS)) - { - return errCode; - } - - return make_error_code(ErrorCode::SUCCESS); -} - -VPITensorStream::VPIColorConvertImpl::~VPIColorConvertImpl() -{ - // Destroy Input VPIImage - DestroyVPIImageWrapper(m_inputImage, m_inputImageData); - - // Destroy Output VPIImage - DestroyVPIImageWrapper(m_outputImage, m_outputImageData); -} - -template std::error_code VPITensorStream::VPIColorConvertImpl::execute(Image &, const Image &, - VPIStream &, VPIBackend); -template std::error_code VPITensorStream::VPIColorConvertImpl::execute(Image &, const Image &, - VPIStream &, VPIBackend); -template std::error_code VPITensorStream::VPIColorConvertImpl::execute(Image &, const Image &, - VPIStream &, VPIBackend); -template std::error_code VPITensorStream::VPIColorConvertImpl::execute(Image &, const Image &, - VPIStream &, VPIBackend); -template std::error_code VPITensorStream::VPIColorConvertImpl::execute(Image &, const Image &, - VPIStream &, VPIBackend); -template std::error_code VPITensorStream::VPIColorConvertImpl::execute(Image &, const Image &, - VPIStream &, VPIBackend); -template std::error_code VPITensorStream::VPIColorConvertImpl::execute(Image &, const Image &, - VPIStream &, VPIBackend); -template std::error_code VPITensorStream::VPIColorConvertImpl::execute(Image &, const Image &, - VPIStream &, VPIBackend); -template std::error_code VPITensorStream::VPIColorConvertImpl::execute(Image &, const Image &, - VPIStream &, VPIBackend); -template std::error_code VPITensorStream::VPIColorConvertImpl::execute(Image &, const Image &, - VPIStream &, VPIBackend); - -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIColorConvertImpl.h b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIColorConvertImpl.h deleted file mode 100644 index 0e71266..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIColorConvertImpl.h +++ /dev/null @@ -1,65 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_VPI_COLOR_CONVERT_IMPL_H -#define CVCORE_VPI_COLOR_CONVERT_IMPL_H - -#include "VPITensorOperators.h" -#include "cv/tensor_ops/ITensorOperatorStream.h" -#include "cv/tensor_ops/ImageUtils.h" - -#include - -namespace cvcore { namespace tensor_ops { - -/** - * Color convert implementation for VPI backend. - */ -class VPITensorStream::VPIColorConvertImpl -{ -public: - /** - * Image color conversion constructor. - */ - VPIColorConvertImpl(); - - /** - * Image color conversion a given image type. - * @param outputImage Output image. - * @param inputImage Input image. - * @param stream specified VPI stream. - * @param backend specified VPI backend. - */ - template - std::error_code execute(Image &outputImage, const Image &inputImage, VPIStream &stream, - VPIBackend backend); - - /** - * Image color conversion destroy function to deallocate resources. - */ - ~VPIColorConvertImpl(); - -private: - VPIImage m_inputImage; - VPIImage m_outputImage; - VPIImageData m_inputImageData; - VPIImageData m_outputImageData; -}; - -}} // namespace cvcore::tensor_ops - -#endif //CVCORE_VPI_COLOR_CONVERT_IMPL_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIEnumMapping.h b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIEnumMapping.h deleted file mode 100644 index d611167..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIEnumMapping.h +++ /dev/null @@ -1,196 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_VPIENUMMAPPING_H -#define CVCORE_VPIENUMMAPPING_H - -#include - -#include "VPITensorOperators.h" - -namespace cvcore { namespace tensor_ops { - -constexpr VPIBackend ToVpiBackendType(const ComputeEngine &computeEngine) -{ - switch (computeEngine) - { - case ComputeEngine::CPU: - return VPIBackend::VPI_BACKEND_CPU; - case ComputeEngine::PVA: - return VPIBackend::VPI_BACKEND_PVA; - case ComputeEngine::GPU: - return VPIBackend::VPI_BACKEND_CUDA; - case ComputeEngine::VIC: - return VPIBackend::VPI_BACKEND_VIC; - case ComputeEngine::NVENC: - return VPIBackend::VPI_BACKEND_NVENC; - default: - return VPIBackend::VPI_BACKEND_INVALID; - } -} - -constexpr VPIInterpolationType ToVpiInterpolationType(InterpolationType value) -{ - VPIInterpolationType result = VPI_INTERP_NEAREST; - - switch (value) - { - case INTERP_NEAREST: - result = VPI_INTERP_NEAREST; - break; - case INTERP_LINEAR: - result = VPI_INTERP_LINEAR; - break; - case INTERP_CUBIC_CATMULLROM: - result = VPI_INTERP_CATMULL_ROM; - break; - default: - break; - } - - return result; -} - -constexpr VPIBorderExtension ToVpiBorderType(BorderType value) -{ - VPIBorderExtension result = VPI_BORDER_ZERO; - - switch (value) - { - case BORDER_ZERO: - result = VPI_BORDER_ZERO; - break; - case BORDER_REPEAT: - result = VPI_BORDER_CLAMP; - break; - case BORDER_REVERSE: - result = VPI_BORDER_REFLECT; - break; - case BORDER_MIRROR: - result = VPI_BORDER_MIRROR; - break; - default: - break; - } - - return result; -} - -constexpr VPIImageFormat ToVpiImageFormat(ImageType value) -{ - VPIImageFormat result = VPI_IMAGE_FORMAT_Y8_ER; - - switch (value) - { - case Y_U8: - result = VPI_IMAGE_FORMAT_Y8_ER; - break; - case Y_U16: - result = VPI_IMAGE_FORMAT_Y16_ER; - break; - case Y_S8: - result = VPI_IMAGE_FORMAT_S8; - break; - case Y_S16: - result = VPI_IMAGE_FORMAT_S16; - break; - case Y_F32: - result = VPI_IMAGE_FORMAT_F32; - break; - case RGB_U8: - result = VPI_IMAGE_FORMAT_RGB8; - break; - case BGR_U8: - result = VPI_IMAGE_FORMAT_BGR8; - break; - case RGBA_U8: - result = VPI_IMAGE_FORMAT_RGBA8; - break; - case NV12: - result = VPI_IMAGE_FORMAT_NV12_ER; - break; - case NV24: - result = VPI_IMAGE_FORMAT_NV24_ER; - break; - default: - break; - } - - return result; -} - -constexpr VPIPixelType ToVpiPixelType(ImageType value) -{ - VPIPixelType result = VPI_PIXEL_TYPE_U8; - - switch (value) - { - case Y_U8: - result = VPI_PIXEL_TYPE_U8; - break; - case Y_U16: - result = VPI_PIXEL_TYPE_U16; - break; - case Y_S8: - result = VPI_PIXEL_TYPE_S8; - break; - case Y_S16: - result = VPI_PIXEL_TYPE_S16; - break; - case Y_F32: - result = VPI_PIXEL_TYPE_F32; - break; - case RGB_U8: - result = VPI_PIXEL_TYPE_3U8; - break; - case BGR_U8: - result = VPI_PIXEL_TYPE_3U8; - break; - case RGBA_U8: - result = VPI_PIXEL_TYPE_4U8; - break; - default: - break; - } - - return result; -} - -static inline std::string getVPIBackendString(VPIBackend vpiBackend) -{ - switch (vpiBackend) - { - case VPIBackend::VPI_BACKEND_CPU: - return "CPU"; - case VPIBackend::VPI_BACKEND_CUDA: - return "GPU"; - case VPIBackend::VPI_BACKEND_VIC: - return "VIC"; - case VPIBackend::VPI_BACKEND_PVA: - return "PVA"; - case VPIBackend::VPI_BACKEND_NVENC: - return "NVENC"; - case VPIBackend::VPI_BACKEND_INVALID: - return "INVALID"; - default: - return "INVALID"; - } -} - -}} // namespace cvcore::tensor_ops - -#endif // CVCORE_VPIENUMMAPPING_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIRemapImpl.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIRemapImpl.cpp deleted file mode 100644 index cb544fd..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIRemapImpl.cpp +++ /dev/null @@ -1,160 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include -#include - -#include "VPIRemapImpl.h" -#include "VPIEnumMapping.h" -#include "VPIStatusMapping.h" - -#include "cv/core/CameraModel.h" -#include "cv/core/Image.h" - -#include -#include - -#ifdef NVBENCH_ENABLE -#include -#endif - -namespace cvcore { namespace tensor_ops { - -VPITensorStream::VPIRemapImpl::VPIRemapImpl() - : m_inputImage(nullptr) - , m_outputImage(nullptr) -{ - std::memset(reinterpret_cast(&m_inputImageData), 0, sizeof(VPIImageData)); - std::memset(reinterpret_cast(&m_outputImageData), 0, sizeof(VPIImageData)); -} - -template -std::error_code VPITensorStream::VPIRemapImpl::initialize(Image & outImage, - const Image & inImage, - VPIBackend backend) -{ - std::error_code status; - status = CreateVPIImageWrapper(m_inputImage, m_inputImageData, inImage, backend); - if(status == make_error_code(VPI_SUCCESS)) - { - status = CreateVPIImageWrapper(m_outputImage, m_outputImageData, outImage, backend); - } - - return status; -} -template std::error_code VPITensorStream::VPIRemapImpl::initialize(Image & outImage, - const Image & inImage, VPIBackend); -template std::error_code VPITensorStream::VPIRemapImpl::initialize(Image & outImage, - const Image & inImage, VPIBackend); -template std::error_code VPITensorStream::VPIRemapImpl::initialize(Image & outImage, - const Image & inImage, VPIBackend); -template std::error_code VPITensorStream::VPIRemapImpl::initialize(Image & outImage, - const Image & inImage, VPIBackend); - -// ----------------------------------------------------------------------------- - -template -std::error_code VPITensorStream::VPIRemapImpl::execute(Image & outImage, - const Image & inImage, - const VPIImageWarp * warp, - InterpolationType interpolation, - BorderType border, - VPIStream & stream, - VPIBackend backend) -{ - std::error_code status = make_error_code(VPI_SUCCESS); - VPIInterpolationType vpiInterpolationType = ToVpiInterpolationType(interpolation); - VPIBorderExtension vpiBorderExt = ToVpiBorderType(border); - - bool paramsChanged = m_inputImage == nullptr || m_outputImage == nullptr || - CheckParamsChanged(m_inputImageData, inImage) || - CheckParamsChanged(m_outputImageData, outImage); - - if(paramsChanged) - { - DestroyVPIImageWrapper(m_inputImage, m_inputImageData); - DestroyVPIImageWrapper(m_outputImage, m_outputImageData); - status = initialize(outImage, inImage, backend); - } - - if(status == make_error_code(VPI_SUCCESS)) - { - status = UpdateImage(m_inputImage, m_inputImageData, inImage); - } - - if(status == make_error_code(VPI_SUCCESS)) - { - status = UpdateImage(m_outputImage, m_outputImageData, outImage); - } - - if(status == make_error_code(VPI_SUCCESS)) - { -#ifdef NVBENCH_ENABLE - std::string tag = "VPISubmitRemap_" + GetMemoryTypeAsString(inImage.isCPU()) +"Input_" + GetMemoryTypeAsString(outImage.isCPU()) +"Output_" + getVPIBackendString(backend) + "Backend"; - nv::bench::Timer timerFunc = - nv::bench::VPI(tag.c_str(), nv::bench::Flag::DEFAULT, stream); -#endif - // Submit remap task for Lens Distortion Correction - status = make_error_code(vpiSubmitRemap(stream, backend, warp->payload, - m_inputImage, m_outputImage, vpiInterpolationType, vpiBorderExt, 0)); - } - - if(status == make_error_code(VPI_SUCCESS)) - { - // Wait for remap to complete - status = make_error_code(vpiStreamSync(stream)); - } - return status; -} -template std::error_code VPITensorStream::VPIRemapImpl::execute(Image & outImage, - const Image & inImage, - const VPIImageWarp * warp, - InterpolationType interpolation, - BorderType border, - VPIStream & stream, - VPIBackend backend); -template std::error_code VPITensorStream::VPIRemapImpl::execute(Image & outImage, - const Image & inImage, - const VPIImageWarp * warp, - InterpolationType interpolation, - BorderType border, - VPIStream & stream, - VPIBackend backend); -template std::error_code VPITensorStream::VPIRemapImpl::execute(Image & outImage, - const Image & inImage, - const VPIImageWarp * warp, - InterpolationType interpolation, - BorderType border, - VPIStream & stream, - VPIBackend backend); -template std::error_code VPITensorStream::VPIRemapImpl::execute(Image & outImage, - const Image & inImage, - const VPIImageWarp * warp, - InterpolationType interpolation, - BorderType border, - VPIStream & stream, - VPIBackend backend); -// ----------------------------------------------------------------------------- - -VPITensorStream::VPIRemapImpl::~VPIRemapImpl() -{ - DestroyVPIImageWrapper(m_inputImage, m_inputImageData); - DestroyVPIImageWrapper(m_outputImage, m_outputImageData); -} -// ----------------------------------------------------------------------------- - -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIRemapImpl.h b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIRemapImpl.h deleted file mode 100644 index e129f04..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIRemapImpl.h +++ /dev/null @@ -1,82 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_VPIREMAPIMPL_H -#define CVCORE_VPIREMAPIMPL_H - -#include -#include - -#include "VPITensorOperators.h" -#include "VPIImageWarp.h" - -namespace cvcore { namespace tensor_ops { -/** - * Remap implementation used for Lens Distortion. - */ -class VPITensorStream::VPIRemapImpl -{ - public: - /* VPIRemapImpl constructor */ - VPIRemapImpl(); - - /** - * Remap Intialization. - * @param outputImage Remap output image of Type - * @param inputImage Remap input image of Type - * @param backend Compute backend - * @return Success if intialization is done successfully, otherwise error is returned - */ - template - std::error_code initialize(Image & outImage, - const Image & inImage, - VPIBackend backend); - - /** - * Remap execution function(non-blocking) - * Application is reqiured to call Sync() before accessing the generated Image. - * @param outImage Remap output image of type NV12 - * @param inImage Remap input image of type NV12 - * @param warp Remap warp pointer - * @param interpolation Interpolation type used for remap - * @param border Border type used for remap - * @param stream VPI stream used for remap - * @param backend VPI backend used for remap - * @return Success if remap is submitted successfully, otherwise error is returned - */ - template - std::error_code execute(Image & outImage, - const Image & inImage, - const VPIImageWarp * warp, - InterpolationType interpolation, - BorderType border, - VPIStream & stream, - VPIBackend backend); - - /* VPIRemapImpl destructor to release resources */ - ~VPIRemapImpl(); - - private: - VPIImage m_inputImage; - VPIImage m_outputImage; - VPIImageData m_inputImageData; - VPIImageData m_outputImageData; -}; - -}} // namespace cvcore::tensor_ops - -#endif //CVCORE_VPIREMAPIMPL_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIResizeImpl.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIResizeImpl.cpp deleted file mode 100644 index 1adffa3..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIResizeImpl.cpp +++ /dev/null @@ -1,139 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include "VPIResizeImpl.h" - -#include -#include -#include -#include - -// VPI includes -#include -#include -#include -#include -#include - -#include "VPIEnumMapping.h" -#include "VPIStatusMapping.h" -#include "cv/core/CameraModel.h" -#include "cv/core/Image.h" - -#ifdef NVBENCH_ENABLE -#include -#endif - -namespace cvcore { namespace tensor_ops { - -template -std::error_code VPITensorStream::VPIResizeImpl::execute(Image &outputImage, const Image &inputImage, - InterpolationType interpolation, BorderType border, - VPIStream &stream, VPIBackend backend) -{ - std::error_code errCode = ErrorCode::SUCCESS; - VPIStatus status = VPIStatus::VPI_SUCCESS; - VPIInterpolationType interpType; - VPIBorderExtension borderExt; - interpType = ToVpiInterpolationType(interpolation); - borderExt = ToVpiBorderType(border); - - bool paramsChanged = m_inputImage == nullptr || m_outputImage == nullptr || - CheckParamsChanged(m_inputImageData, inputImage) || - CheckParamsChanged(m_outputImageData, outputImage); - if (paramsChanged) - { - DestroyVPIImageWrapper(m_inputImage, m_inputImageData); - DestroyVPIImageWrapper(m_outputImage, m_outputImageData); - errCode = CreateVPIImageWrapper(m_inputImage, m_inputImageData, inputImage, backend); - if (errCode == make_error_code(VPI_SUCCESS)) - { - errCode = CreateVPIImageWrapper(m_outputImage, m_outputImageData, outputImage, backend); - } - } - else - { - - errCode = UpdateImage(m_inputImage, m_inputImageData, inputImage); - if (errCode == make_error_code(VPIStatus::VPI_SUCCESS)) - { - errCode = UpdateImage(m_outputImage, m_outputImageData, outputImage); - } - } - - if (status == VPIStatus::VPI_SUCCESS) - { -#ifdef NVBENCH_ENABLE - std::string tag = "VPISubmitRescale_" + GetMemoryTypeAsString(inputImage.isCPU()) +"Input_" + GetMemoryTypeAsString(outputImage.isCPU()) +"Output_" + getVPIBackendString(backend) + "Backend"; - nv::bench::Timer timerFunc = - nv::bench::VPI(tag.c_str(), nv::bench::Flag::DEFAULT, stream); -#endif - // Resize - status = vpiSubmitRescale(stream, backend, m_inputImage, m_outputImage, interpType, borderExt, 0); - } - - if (status == VPIStatus::VPI_SUCCESS) - { - status = vpiStreamSync(stream); - } - - if (status != VPIStatus::VPI_SUCCESS) - { - return make_error_code(status); - } - return make_error_code(ErrorCode::SUCCESS); -} - -VPITensorStream::VPIResizeImpl::VPIResizeImpl() - : m_inputImage(nullptr) - , m_outputImage(nullptr) -{ - std::memset(reinterpret_cast(&m_inputImageData), 0, sizeof(VPIImageData)); - std::memset(reinterpret_cast(&m_outputImageData), 0, sizeof(VPIImageData)); -} - -/** -* Image resizing destroy function to deallocate resources. -*/ -VPITensorStream::VPIResizeImpl::~VPIResizeImpl() -{ - // Destroy Input VPIImage - DestroyVPIImageWrapper(m_inputImage, m_inputImageData); - // Destroy Output VPIImage - DestroyVPIImageWrapper(m_outputImage, m_outputImageData); -} - -template std::error_code VPITensorStream::VPIResizeImpl::execute(Image &outputImage, - const Image &inputImage, - InterpolationType interpolation, BorderType border, - VPIStream &stream, VPIBackend backend); -template std::error_code VPITensorStream::VPIResizeImpl::execute(Image &outputImage, - const Image &inputImage, - InterpolationType interpolation, BorderType border, - VPIStream &stream, VPIBackend backend); -template std::error_code VPITensorStream::VPIResizeImpl::execute(Image &outputImage, - const Image &inputImage, - InterpolationType interpolation, BorderType border, - VPIStream &stream, VPIBackend backend); -template std::error_code VPITensorStream::VPIResizeImpl::execute(Image &outputImage, - const Image &inputImage, - InterpolationType interpolation, BorderType border, - VPIStream &stream, VPIBackend backend); -template std::error_code VPITensorStream::VPIResizeImpl::execute(Image &outputImage, - const Image &inputImage, - InterpolationType interpolation, BorderType border, - VPIStream &stream, VPIBackend backend); -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIResizeImpl.h b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIResizeImpl.h deleted file mode 100644 index ec20a45..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIResizeImpl.h +++ /dev/null @@ -1,66 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_VPIRESIZEIMPL_H -#define CVCORE_VPIRESIZEIMPL_H - -#include "VPITensorOperators.h" - -#include - -#include "cv/tensor_ops/ITensorOperatorStream.h" -#include "cv/tensor_ops/ImageUtils.h" - -namespace cvcore { namespace tensor_ops { - -/** - * Remap implementation used for Lens Distortion. - */ -class VPITensorStream::VPIResizeImpl -{ -public: - /** - * Initialization for Image resizing. - */ - VPIResizeImpl(); - - /** - * Image resizing a given image type. - * @param outputImage Resize output image of type .RGB_U8 - * @param inputImage Resize input image of type RGB_U8. - * @param type Interpolation type used for resize. - * @param border Image border extension used for resize - */ - template - std::error_code execute(Image &outputImage, const Image &inputImage, InterpolationType interpolation, - BorderType border, VPIStream &stream, VPIBackend backend); - - /** - * Image resizing destroy function to deallocate resources. - */ - ~VPIResizeImpl(); - -private: - VPIImage m_inputImage; - VPIImage m_outputImage; - VPIImageData m_inputImageData; - VPIImageData m_outputImageData; -}; - -}} // namespace cvcore::tensor_ops - -#endif //CVCORE_VPIRESIZEIMPL_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStatusMapping.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStatusMapping.cpp deleted file mode 100644 index a43431d..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStatusMapping.cpp +++ /dev/null @@ -1,122 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cv/tensor_ops/Errors.h" -#include "vpi/Status.h" - -#ifndef __cpp_lib_to_underlying -// Using a C++23 feature by hacking std -namespace std -{ - template - constexpr underlying_type_t to_underlying(Enum e) noexcept - { - return static_cast>(e); - } -}; -#endif // __cpp_lib_to_underlying - -namespace cvcore { namespace tensor_ops { - -namespace detail -{ - struct VPIStatusCategory : std::error_category - { - virtual const char * name() const noexcept override final - { - return "vpi-status"; - } - - virtual std::string message(int value) const override final - { - std::string result = "VPI Status"; - - return result; - } - - virtual std::error_condition default_error_condition(int code) const noexcept override final - { - std::error_condition result; - - switch(code) - { - case VPI_SUCCESS: - result = ErrorCode::SUCCESS; - break; - - case VPI_ERROR_INVALID_ARGUMENT: - result = ErrorCode::INVALID_ARGUMENT; - break; - - case VPI_ERROR_INVALID_IMAGE_FORMAT: - result = ErrorCode::INVALID_IMAGE_FORMAT; - break; - - case VPI_ERROR_INVALID_ARRAY_TYPE: - result = ErrorCode::INVALID_STORAGE_TYPE; - break; - - case VPI_ERROR_INVALID_PAYLOAD_TYPE: - result = ErrorCode::INVALID_STORAGE_TYPE; - break; - - case VPI_ERROR_INVALID_OPERATION: - result = ErrorCode::INVALID_OPERATION; - break; - - case VPI_ERROR_INVALID_CONTEXT: - result = ErrorCode::INVALID_ENGINE_TYPE; - break; - - case VPI_ERROR_DEVICE: - result = ErrorCode::DEVICE_ERROR; - break; - - case VPI_ERROR_NOT_READY: - result = ErrorCode::NOT_READY; - break; - - case VPI_ERROR_BUFFER_LOCKED: - result = ErrorCode::SYSTEM_ERROR; - break; - - case VPI_ERROR_OUT_OF_MEMORY: - result = ErrorCode::OUT_OF_MEMORY; - break; - - case VPI_ERROR_INTERNAL: - result = ErrorCode::SYSTEM_ERROR; - break; - - default: - result = ErrorCode::NOT_IMPLEMENTED; - break; - } - - return result; - } - }; -} // namespace detail - -const detail::VPIStatusCategory errorCategory{}; - -std::error_code make_error_code(VPIStatus ec) noexcept -{ - return {std::to_underlying(ec), errorCategory}; -} - -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStatusMapping.h b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStatusMapping.h deleted file mode 100644 index 961d4fc..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStatusMapping.h +++ /dev/null @@ -1,38 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_VPISTATUSMAPPING_H -#define CVCORE_VPISTATUSMAPPING_H - -#include "cv/core/CVError.h" -#include "vpi/Status.h" - -// WARNING: Extending base C++ namespace to cover cvcore error codes -namespace std { - -template <> -struct is_error_code_enum : true_type {}; - -} // namespace std - -namespace cvcore { namespace tensor_ops { - -std::error_code make_error_code(VPIStatus) noexcept; - -}} // namespace cvcore::tensor_ops - -#endif //CVCORE_VPISTATUSMAPPING_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStereoDisparityEstimatorImpl.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStereoDisparityEstimatorImpl.cpp deleted file mode 100644 index c4e96e4..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStereoDisparityEstimatorImpl.cpp +++ /dev/null @@ -1,211 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include -#include - -#include "VPIStereoDisparityEstimatorImpl.h" -#include "VPIEnumMapping.h" -#include "VPIStatusMapping.h" - -#include "cv/core/CameraModel.h" -#include "cv/core/Image.h" - -#include - -#ifdef NVBENCH_ENABLE -#include -#endif - -namespace cvcore { namespace tensor_ops { - -VPITensorStream::VPIStereoDisparityEstimatorImpl::VPIStereoDisparityEstimatorImpl() - : m_inputLeftImage(nullptr) - , m_inputRightImage(nullptr) - , m_outputImage(nullptr) - , m_tempImage(nullptr) - , m_payload(nullptr) - , m_stereoParams() -{ - std::memset(reinterpret_cast(&m_inputLeftImageData), 0, sizeof(VPIImageData)); - std::memset(reinterpret_cast(&m_inputRightImageData), 0, sizeof(VPIImageData)); - std::memset(reinterpret_cast(&m_outputImageData), 0, sizeof(VPIImageData)); - // Disparity values returned from VPI are in Q10.5 format, i.e., signed fixed point with 5 fractional bits. Divide it by 32.0f to convert it to floating point. - vpiInitConvertImageFormatParams(&m_cvtParams); - m_cvtParams.scale = 1.0f / 32; -} - -template -std::error_code VPITensorStream::VPIStereoDisparityEstimatorImpl::initialize(Image &outImage, - const Image &leftImage, - const Image &rightImage, - VPIBackend backend) -{ - std::error_code status; - const std::error_code success = make_error_code(VPI_SUCCESS); - status = CreateVPIImageWrapper(m_inputLeftImage, m_inputLeftImageData, leftImage, backend); - if (status == success) - { - status = CreateVPIImageWrapper(m_inputRightImage, m_inputRightImageData, rightImage, backend); - } - if (status == success) - { - status = CreateVPIImageWrapper(m_outputImage, m_outputImageData, outImage, backend); - } - if (status == success) - { - status = make_error_code( - vpiImageCreate(outImage.getWidth(), outImage.getHeight(), VPI_IMAGE_FORMAT_S16, 0, &m_tempImage)); - } - if (status == success) - { - status = make_error_code(vpiCreateStereoDisparityEstimator(backend, outImage.getWidth(), outImage.getHeight(), - ToVpiImageFormat(T_IN), NULL, &m_payload)); - } - return status; -} - -template std::error_code VPITensorStream::VPIStereoDisparityEstimatorImpl::initialize(Image &outImage, - const Image &leftImage, - const Image &rightImage, - VPIBackend backend); -template std::error_code VPITensorStream::VPIStereoDisparityEstimatorImpl::initialize(Image &outImage, - const Image &leftImage, - const Image &rightImage, - VPIBackend backend); -template std::error_code VPITensorStream::VPIStereoDisparityEstimatorImpl::initialize(Image &outImage, - const Image &leftImage, - const Image &rightImage, - VPIBackend backend); - -// ----------------------------------------------------------------------------- - -template -std::error_code VPITensorStream::VPIStereoDisparityEstimatorImpl::execute(Image &outImage, - const Image &leftImage, - const Image &rightImage, - size_t windowSize, size_t maxDisparity, - VPIStream &stream, VPIBackend backend) -{ - std::error_code status = make_error_code(VPI_SUCCESS); - m_stereoParams.windowSize = static_cast(windowSize); - m_stereoParams.maxDisparity = static_cast(maxDisparity); - - bool paramsChanged = m_inputLeftImage == nullptr || m_inputRightImage == nullptr || m_outputImage == nullptr || - CheckParamsChanged(m_inputLeftImageData, leftImage) || - CheckParamsChanged(m_inputRightImageData, rightImage) || - CheckParamsChanged(m_outputImageData, outImage); - - if (paramsChanged) - { - if (m_payload != nullptr) - { - vpiPayloadDestroy(m_payload); - } - if (m_tempImage != nullptr) - { - vpiImageDestroy(m_tempImage); - } - DestroyVPIImageWrapper(m_inputLeftImage, m_inputLeftImageData); - DestroyVPIImageWrapper(m_inputRightImage, m_inputRightImageData); - DestroyVPIImageWrapper(m_outputImage, m_outputImageData); - - status = initialize(outImage, leftImage, rightImage, backend); - } - - if (status == make_error_code(VPI_SUCCESS)) - { - status = UpdateImage(m_inputLeftImage, m_inputLeftImageData, leftImage); - } - - if (status == make_error_code(VPI_SUCCESS)) - { - status = UpdateImage(m_inputRightImage, m_inputRightImageData, rightImage); - } - - if (status == make_error_code(VPI_SUCCESS)) - { - status = UpdateImage(m_outputImage, m_outputImageData, outImage); - } - - if (status == make_error_code(VPI_SUCCESS)) - { -#ifdef NVBENCH_ENABLE - std::string tag = "VPISubmitStereoDisparityEstimator_" + GetMemoryTypeAsString(leftImage.isCPU()) + "Input_" + - GetMemoryTypeAsString(outImage.isCPU()) + "Output_" + getVPIBackendString(backend) + - "Backend"; - nv::bench::Timer timerFunc = nv::bench::VPI(tag.c_str(), nv::bench::Flag::DEFAULT, stream); -#endif - // Submit SGM task for Stereo Disparity Estimator - status = make_error_code(vpiSubmitStereoDisparityEstimator( - stream, backend, m_payload, m_inputLeftImage, m_inputRightImage, m_tempImage, NULL, &m_stereoParams)); - } - - if (status == make_error_code(VPI_SUCCESS)) - { -#ifdef NVBENCH_ENABLE - std::string tag = "VPISubmitConvertImageFormat_" + GetMemoryTypeAsString(leftImage.isCPU()) + "Input_" + - GetMemoryTypeAsString(outImage.isCPU()) + "Output_" + getVPIBackendString(backend) + - "Backend"; - nv::bench::Timer timerFunc = nv::bench::VPI(tag.c_str(), nv::bench::Flag::DEFAULT, stream); -#endif - // Submit SGM task for Stereo Disparity Estimator - status = - make_error_code(vpiSubmitConvertImageFormat(stream, backend, m_tempImage, m_outputImage, &m_cvtParams)); - } - - if (status == make_error_code(VPI_SUCCESS)) - { - // Wait for stereo disparity estimator to complete - status = make_error_code(vpiStreamSync(stream)); - } - - if (status != make_error_code(VPI_SUCCESS)) - { - return status; - } - return make_error_code(ErrorCode::SUCCESS); -} - -template std::error_code VPITensorStream::VPIStereoDisparityEstimatorImpl::execute( - Image &outImage, const Image &leftImage, const Image &rightImage, size_t windowSize, - size_t maxDisparity, VPIStream &stream, VPIBackend backend); -template std::error_code VPITensorStream::VPIStereoDisparityEstimatorImpl::execute( - Image &outImage, const Image &leftImage, const Image &rightImage, size_t windowSize, - size_t maxDisparity, VPIStream &stream, VPIBackend backend); -template std::error_code VPITensorStream::VPIStereoDisparityEstimatorImpl::execute( - Image &outImage, const Image &leftImage, const Image &rightImage, size_t windowSize, - size_t maxDisparity, VPIStream &stream, VPIBackend backend); -// ----------------------------------------------------------------------------- - -VPITensorStream::VPIStereoDisparityEstimatorImpl::~VPIStereoDisparityEstimatorImpl() -{ - if (m_payload != nullptr) - { - vpiPayloadDestroy(m_payload); - } - if (m_tempImage != nullptr) - { - vpiImageDestroy(m_tempImage); - } - DestroyVPIImageWrapper(m_inputLeftImage, m_inputLeftImageData); - DestroyVPIImageWrapper(m_inputRightImage, m_inputRightImageData); - DestroyVPIImageWrapper(m_outputImage, m_outputImageData); -} -// ----------------------------------------------------------------------------- - -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStereoDisparityEstimatorImpl.h b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStereoDisparityEstimatorImpl.h deleted file mode 100644 index 53a5c63..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIStereoDisparityEstimatorImpl.h +++ /dev/null @@ -1,83 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_VPISTEREODISPARITYESTIMATORIMPL_H -#define CVCORE_VPISTEREODISPARITYESTIMATORIMPL_H - -#include -#include -#include - -#include "VPITensorOperators.h" - -namespace cvcore { namespace tensor_ops { -/** - * StereoDisparityEstimator implementation used for stereo dispaity estimate. - */ -class VPITensorStream::VPIStereoDisparityEstimatorImpl -{ -public: - /* VPIStereoDisparityEstimatorImpl constructor */ - VPIStereoDisparityEstimatorImpl(); - - /** - * StereoDisparityEstimator Intialization. - * @param outputImage StereoDisparityEstimator output image - * @param leftImage StereoDisparityEstimator input left image - * @param rightImage StereoDisparityEstimator input right image - * @param backend VPI backend used for StereoDisparityEstimator - * @return Success if intialization is done successfully, otherwise error is returned - */ - template - std::error_code initialize(Image &outImage, const Image &leftImage, const Image &rightImage, - VPIBackend backend); - - /** - * StereoDisparityEstimator execution function(non-blocking) - * Application is reqiured to call Sync() before accessing the generated Image. - * @param outImage StereoDisparityEstimator output image - * @param leftImage StereoDisparityEstimator input left image - * @param rightImage StereoDisparityEstimator input right image - * @param windowSize Represents the median filter size (on PVA+NVENC_VIC backend) or census transform window size (other backends) used in the algorithm - * @param maxDisparity Maximum disparity for matching search - * @param stream VPI stream used for StereoDisparityEstimator - * @param backend VPI backend used for StereoDisparityEstimator - * @return Success if StereoDisparityEstimator is submitted successfully, otherwise error is returned - */ - template - std::error_code execute(Image &outImage, const Image &leftImage, const Image &rightImage, - size_t windowSize, size_t maxDisparity, VPIStream &stream, VPIBackend backend); - - /* VPIStereoDisparityEstimatorImpl destructor to release resources */ - ~VPIStereoDisparityEstimatorImpl(); - -private: - VPIImage m_inputLeftImage; - VPIImage m_inputRightImage; - VPIImage m_outputImage; - VPIImage m_tempImage; - VPIImageData m_inputLeftImageData; - VPIImageData m_inputRightImageData; - VPIImageData m_outputImageData; - VPIPayload m_payload; - VPIStereoDisparityEstimatorParams m_stereoParams; - VPIConvertImageFormatParams m_cvtParams; -}; - -}} // namespace cvcore::tensor_ops - -#endif //CVCORE_VPISTEREODISPARITYESTIMATORIMPL_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPITensorOperators.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPITensorOperators.cpp deleted file mode 100644 index 48149ef..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPITensorOperators.cpp +++ /dev/null @@ -1,710 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "cv/core/CameraModel.h" -#include "cv/core/Image.h" - -#include "VPIColorConvertImpl.h" -#include "VPIEnumMapping.h" -#include "VPIImageWarp.h" -#include "VPIRemapImpl.h" -#include "VPIResizeImpl.h" -#include "VPIStatusMapping.h" -#include "VPIStereoDisparityEstimatorImpl.h" -#include "VPITensorOperators.h" - -#ifdef NVBENCH_ENABLE -#include -#endif - -namespace cvcore { namespace tensor_ops { - -namespace detail { - -// helper function to wrap VPI image for NV12 / NV24 image types -template::value>::type * = nullptr> -std::error_code CreateVPIImageWrapperImpl(VPIImage &vpiImg, VPIImageData &imgdata, const Image &cvcoreImage, VPIBackend backend) -{ -#ifdef NVBENCH_ENABLE - std::string tag = "CreateVPIImageWrapper_NV12/NV24"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - std::memset(reinterpret_cast(&imgdata), 0, sizeof(VPIImageData)); - - imgdata.bufferType = cvcoreImage.isCPU() ? VPI_IMAGE_BUFFER_HOST_PITCH_LINEAR : VPI_IMAGE_BUFFER_CUDA_PITCH_LINEAR; - imgdata.buffer.pitch.format = ToVpiImageFormat(T); - imgdata.buffer.pitch.numPlanes = 2; - imgdata.buffer.pitch.planes[0].data = const_cast(cvcoreImage.getLumaData()); - imgdata.buffer.pitch.planes[0].height = cvcoreImage.getLumaHeight(); - imgdata.buffer.pitch.planes[0].width = cvcoreImage.getLumaWidth(); - imgdata.buffer.pitch.planes[0].pixelType = VPI_PIXEL_TYPE_U8; - imgdata.buffer.pitch.planes[0].pitchBytes = cvcoreImage.getLumaStride(TensorDimension::HEIGHT) * sizeof(uint8_t); - imgdata.buffer.pitch.planes[1].data = const_cast(cvcoreImage.getChromaData()); - imgdata.buffer.pitch.planes[1].height = cvcoreImage.getChromaHeight(); - imgdata.buffer.pitch.planes[1].width = cvcoreImage.getChromaWidth(); - imgdata.buffer.pitch.planes[1].pixelType = VPI_PIXEL_TYPE_2U8; - imgdata.buffer.pitch.planes[1].pitchBytes = cvcoreImage.getChromaStride(TensorDimension::HEIGHT) * sizeof(uint8_t); - VPIStatus vpiStatus; - vpiStatus = vpiImageCreateWrapper(&imgdata, nullptr, backend, &vpiImg); - - return make_error_code(vpiStatus); -} - -// helper function to wrap VPI image for interleaved image types -template::value>::type * = nullptr> -std::error_code CreateVPIImageWrapperImpl(VPIImage &vpiImg, VPIImageData &imgdata, const Image &cvcoreImage, VPIBackend backend) -{ -#ifdef NVBENCH_ENABLE - std::string tag = "CreateVPIImageWrapper_Interleaved"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - std::memset(reinterpret_cast(&imgdata), 0, sizeof(VPIImageData)); - - using D = typename Image::DataType; - imgdata.bufferType = cvcoreImage.isCPU() ? VPI_IMAGE_BUFFER_HOST_PITCH_LINEAR : VPI_IMAGE_BUFFER_CUDA_PITCH_LINEAR; - imgdata.buffer.pitch.format = ToVpiImageFormat(T); - imgdata.buffer.pitch.numPlanes = 1; - imgdata.buffer.pitch.planes[0].data = const_cast(cvcoreImage.getData()); - imgdata.buffer.pitch.planes[0].height = cvcoreImage.getHeight(); - imgdata.buffer.pitch.planes[0].width = cvcoreImage.getWidth(); - imgdata.buffer.pitch.planes[0].pixelType = ToVpiPixelType(T); - imgdata.buffer.pitch.planes[0].pitchBytes = cvcoreImage.getStride(TensorDimension::HEIGHT) * GetImageElementSize(T); - VPIStatus vpiStatus; - vpiStatus = vpiImageCreateWrapper(&imgdata, nullptr, backend, &vpiImg); - - return make_error_code(vpiStatus); -} - -// helper function to wrap VPI image for planar image types (TODO: not supported in vpi so far) -template::value>::type * = nullptr> -std::error_code CreateVPIImageWrapperImpl(VPIImage &vpiImg, VPIImageData &imgdata, const Image &cvcoreImage, VPIBackend backend) -{ - return make_error_code(VPI_ERROR_INVALID_IMAGE_FORMAT); -} - -} // namespace detail - -std::error_code VPITensorContext::CreateStream(TensorOperatorStream &tensorStream, const ComputeEngine &computeEngine) -{ - tensorStream = nullptr; - - if (!IsComputeEngineCompatible(computeEngine)) - { - return ErrorCode::INVALID_ENGINE_TYPE; - } - - try - { - tensorStream = new VPITensorStream(computeEngine); - } - catch (std::error_code &e) - { - return e; - } - catch (...) - { - return ErrorCode::INVALID_OPERATION; - } - - return ErrorCode::SUCCESS; -} - -VPITensorStream::VPITensorStream(const ComputeEngine &computeEngine) - : m_resizer(new VPIResizeImpl()) - , m_remapper(new VPIRemapImpl()) - , m_colorConverter(new VPIColorConvertImpl()) - , m_stereoDisparityEstimator(new VPIStereoDisparityEstimatorImpl()) -{ - VPIBackend backend = ToVpiBackendType(computeEngine); - VPIStatus status = vpiStreamCreate(backend, &m_stream); - if (status != VPI_SUCCESS) - { - throw make_error_code(status); - } - m_backend = backend; -} - -VPITensorStream::~VPITensorStream() -{ - vpiStreamDestroy(m_stream); -} - -std::error_code VPITensorContext::DestroyStream(TensorOperatorStream &inputStream) -{ - if (inputStream != nullptr) - { - delete inputStream; - inputStream = nullptr; - } - return ErrorCode::SUCCESS; -} - -bool VPITensorContext::IsComputeEngineCompatible(const ComputeEngine &computeEngine) const noexcept -{ - VPIBackend vpibackend = ToVpiBackendType(computeEngine); - if (vpibackend == VPIBackend::VPI_BACKEND_INVALID) - { - return false; - } - return true; -} - -template -std::error_code CreateVPIImageWrapper(VPIImage &vpiImg, VPIImageData &imgdata, const Image &cvcoreImage, VPIBackend backend) -{ - return detail::CreateVPIImageWrapperImpl(vpiImg, imgdata, cvcoreImage, backend); -} - -template std::error_code CreateVPIImageWrapper(VPIImage &, VPIImageData &, const Image &, VPIBackend); -template std::error_code CreateVPIImageWrapper(VPIImage &, VPIImageData &, const Image &, VPIBackend); -template std::error_code CreateVPIImageWrapper(VPIImage &, VPIImageData &, const Image &, VPIBackend); -template std::error_code CreateVPIImageWrapper(VPIImage &, VPIImageData &, const Image &, VPIBackend); -template std::error_code CreateVPIImageWrapper(VPIImage &, VPIImageData &, const Image &, VPIBackend); -template std::error_code CreateVPIImageWrapper(VPIImage &, VPIImageData &, const Image &, VPIBackend); -template std::error_code CreateVPIImageWrapper(VPIImage &, VPIImageData &, const Image &, VPIBackend); -template std::error_code CreateVPIImageWrapper(VPIImage &, VPIImageData &, const Image &, VPIBackend); -template std::error_code CreateVPIImageWrapper(VPIImage &, VPIImageData &, const Image &, VPIBackend); -template std::error_code CreateVPIImageWrapper(VPIImage &, VPIImageData &, const Image &, VPIBackend); - -std::error_code UpdateVPIImageWrapper(VPIImage &image, VPIImageData &imageWrap, bool isCPU) -{ -#ifdef NVBENCH_ENABLE - std::string tag = "UpdateVPIImageWrapper"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - VPIStatus status = VPI_SUCCESS; - status = vpiImageSetWrapper(image, &imageWrap); - - return make_error_code(status); -} - -std::error_code DestroyVPIImageWrapper(VPIImage &image, VPIImageData &imageWrap) -{ -#ifdef NVBENCH_ENABLE - std::string tag = "DestroyVPIImageWrapper"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - std::memset(reinterpret_cast(&imageWrap), 0, sizeof(VPIImageData)); - if (image != nullptr) - { - vpiImageDestroy(image); - } - - image = nullptr; - - return ErrorCode::SUCCESS; -} -std::error_code VPITensorStream::Status() noexcept -{ - // TODO Needs to be updated for supporting non blocking function calls. - return ErrorCode::SUCCESS; -} - -std::error_code VPITensorStream::Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation, BorderType border) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIResize_RGB_U8_" + std::to_string(inputImage.getWidth()) + "X" + - std::to_string(inputImage.getHeight()) + "_" + std::to_string(outputImage.getWidth()) + "X" + - std::to_string(outputImage.getHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - std::error_code err_code; - err_code = m_resizer->execute(outputImage, inputImage, interpolation, border, m_stream, m_backend); - return err_code; -} - -std::error_code VPITensorStream::Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation, BorderType border) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIResize_NV12_" + std::to_string(inputImage.getLumaWidth()) + "X" + - std::to_string(inputImage.getLumaHeight()) + "_" + std::to_string(outputImage.getLumaWidth()) + - "X" + std::to_string(outputImage.getLumaHeight()) + "_" + getVPIBackendString(m_backend) + - "Backend_" + GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - ; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - std::error_code err_code; - err_code = m_resizer->execute(outputImage, inputImage, interpolation, border, m_stream, m_backend); - return err_code; -} - -std::error_code VPITensorStream::Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation, BorderType border) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIResize_RGBA_U8_" + std::to_string(inputImage.getWidth()) + "X" + - std::to_string(inputImage.getHeight()) + "_" + std::to_string(outputImage.getWidth()) + "X" + - std::to_string(outputImage.getHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - std::error_code err_code; - err_code = m_resizer->execute(outputImage, inputImage, interpolation, border, m_stream, m_backend); - return err_code; -} - -std::error_code VPITensorStream::Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation, BorderType border) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIResize_BGR_U8_" + std::to_string(inputImage.getWidth()) + "X" + - std::to_string(inputImage.getHeight()) + "_" + std::to_string(outputImage.getWidth()) + "X" + - std::to_string(outputImage.getHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - std::error_code err_code; - err_code = m_resizer->execute(outputImage, inputImage, interpolation, border, m_stream, m_backend); - return err_code; -} - -std::error_code VPITensorStream::Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation, BorderType border) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIResize_NV24_" + std::to_string(inputImage.getLumaWidth()) + "X" + - std::to_string(inputImage.getLumaHeight()) + "_" + std::to_string(outputImage.getLumaWidth()) + - "X" + std::to_string(outputImage.getLumaHeight()) + "_" + getVPIBackendString(m_backend) + - "Backend_" + GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - std::error_code err_code; - err_code = m_resizer->execute(outputImage, inputImage, interpolation, border, m_stream, m_backend); - return err_code; -} - -std::error_code VPITensorStream::Remap(Image &outputImage, const Image &inputImage, - const ImageWarp warp, InterpolationType interpolation, BorderType border) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIRemap_RGB_U8_" + std::to_string(inputImage.getWidth()) + "X" + - std::to_string(inputImage.getHeight()) + "_" + std::to_string(outputImage.getWidth()) + "X" + - std::to_string(outputImage.getHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - return m_remapper->execute(outputImage, inputImage, reinterpret_cast(warp), interpolation, border, - m_stream, m_backend); -} - -std::error_code VPITensorStream::Remap(Image &outputImage, const Image &inputImage, - const ImageWarp warp, InterpolationType interpolation, BorderType border) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIRemap_BGR_U8_" + std::to_string(inputImage.getWidth()) + "X" + - std::to_string(inputImage.getHeight()) + "_" + std::to_string(outputImage.getWidth()) + "X" + - std::to_string(outputImage.getHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - return m_remapper->execute(outputImage, inputImage, reinterpret_cast(warp), interpolation, border, - m_stream, m_backend); -} - -std::error_code VPITensorStream::Remap(Image &outputImage, const Image &inputImage, const ImageWarp warp, - InterpolationType interpolation, BorderType border) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIRemap_NV12_" + std::to_string(inputImage.getLumaWidth()) + "X" + - std::to_string(inputImage.getLumaHeight()) + "_" + std::to_string(outputImage.getLumaWidth()) + - "X" + std::to_string(outputImage.getLumaHeight()) + "_" + getVPIBackendString(m_backend) + - "Backend_" + GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - return m_remapper->execute(outputImage, inputImage, reinterpret_cast(warp), interpolation, border, - m_stream, m_backend); -} - -std::error_code VPITensorStream::Remap(Image &outputImage, const Image &inputImage, const ImageWarp warp, - InterpolationType interpolation, BorderType border) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIRemap_NV24_" + std::to_string(inputImage.getLumaWidth()) + "X" + - std::to_string(inputImage.getLumaHeight()) + "_" + std::to_string(outputImage.getLumaWidth()) + - "X" + std::to_string(outputImage.getLumaHeight()) + "_" + getVPIBackendString(m_backend) + - "Backend_" + GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - return m_remapper->execute(outputImage, inputImage, reinterpret_cast(warp), interpolation, border, - m_stream, m_backend); -} - -std::error_code VPITensorStream::ColorConvert(Image &outputImage, const Image &inputImage) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIColorConvert_BGR_RGB_" + std::to_string(inputImage.getWidth()) + "X" + - std::to_string(inputImage.getHeight()) + "_" + std::to_string(outputImage.getWidth()) + "X" + - std::to_string(outputImage.getHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - return m_colorConverter->execute(outputImage, inputImage, m_stream, m_backend); -} - -std::error_code VPITensorStream::ColorConvert(Image &outputImage, const Image &inputImage) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIColorConvert_RGB_BGR_" + std::to_string(inputImage.getWidth()) + "X" + - std::to_string(inputImage.getHeight()) + "_" + std::to_string(outputImage.getWidth()) + "X" + - std::to_string(outputImage.getHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - return m_colorConverter->execute(outputImage, inputImage, m_stream, m_backend); -} - -std::error_code VPITensorStream::ColorConvert(Image &outputImage, const Image &inputImage) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIColorConvert_NV12_BGR_" + std::to_string(inputImage.getWidth()) + "X" + - std::to_string(inputImage.getHeight()) + "_" + std::to_string(outputImage.getLumaWidth()) + "X" + - std::to_string(outputImage.getLumaHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - return m_colorConverter->execute(outputImage, inputImage, m_stream, m_backend); -} - -std::error_code VPITensorStream::ColorConvert(Image &outputImage, const Image &inputImage) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIColorConvert_NV24_BGR_" + std::to_string(inputImage.getWidth()) + "X" + - std::to_string(inputImage.getHeight()) + "_" + std::to_string(outputImage.getLumaWidth()) + "X" + - std::to_string(outputImage.getLumaHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - return m_colorConverter->execute(outputImage, inputImage, m_stream, m_backend); -} - -std::error_code VPITensorStream::ColorConvert(Image &outputImage, const Image &inputImage) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIColorConvert_BGR_NV12_" + std::to_string(inputImage.getLumaWidth()) + "X" + - std::to_string(inputImage.getLumaHeight()) + "_" + std::to_string(outputImage.getWidth()) + "X" + - std::to_string(outputImage.getHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - return m_colorConverter->execute(outputImage, inputImage, m_stream, m_backend); -} - -std::error_code VPITensorStream::ColorConvert(Image &outputImage, const Image &inputImage) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIColorConvert_BGR_NV24_" + std::to_string(inputImage.getLumaWidth()) + "X" + - std::to_string(inputImage.getLumaHeight()) + "_" + std::to_string(outputImage.getWidth()) + "X" + - std::to_string(outputImage.getHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - return m_colorConverter->execute(outputImage, inputImage, m_stream, m_backend); -} - -std::error_code VPITensorStream::ColorConvert(Image &outputImage, const Image &inputImage) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIColorConvert_NV12_RGB_" + std::to_string(inputImage.getWidth()) + "X" + - std::to_string(inputImage.getHeight()) + "_" + std::to_string(outputImage.getLumaWidth()) + "X" + - std::to_string(outputImage.getLumaHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - return m_colorConverter->execute(outputImage, inputImage, m_stream, m_backend); -} - -std::error_code VPITensorStream::ColorConvert(Image &outputImage, const Image &inputImage) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIColorConvert_NV24_RGB_" + std::to_string(inputImage.getWidth()) + "X" + - std::to_string(inputImage.getHeight()) + "_" + std::to_string(outputImage.getLumaWidth()) + "X" + - std::to_string(outputImage.getLumaHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - return m_colorConverter->execute(outputImage, inputImage, m_stream, m_backend); -} - -std::error_code VPITensorStream::ColorConvert(Image &outputImage, const Image &inputImage) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIColorConvert_RGB_NV12_" + std::to_string(inputImage.getLumaWidth()) + "X" + - std::to_string(inputImage.getLumaHeight()) + "_" + std::to_string(outputImage.getWidth()) + "X" + - std::to_string(outputImage.getHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - return m_colorConverter->execute(outputImage, inputImage, m_stream, m_backend); -} - -std::error_code VPITensorStream::ColorConvert(Image &outputImage, const Image &inputImage) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "VPIColorConvert_RGB_NV24_" + std::to_string(inputImage.getLumaWidth()) + "X" + - std::to_string(inputImage.getLumaHeight()) + "_" + std::to_string(outputImage.getWidth()) + "X" + - std::to_string(outputImage.getHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - return m_colorConverter->execute(outputImage, inputImage, m_stream, m_backend); -} - -std::error_code VPITensorStream::StereoDisparityEstimator(Image &outputImage, const Image &inputLeftImage, - const Image &inputRightImage, size_t windowSize, - size_t maxDisparity) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "StereoDisparityEstimator_Y_F32_Y_U8_" + std::to_string(inputLeftImage.getWidth()) + "X" + - std::to_string(inputLeftImage.getHeight()) + "_" + std::to_string(outputImage.getWidth()) + "X" + - std::to_string(outputImage.getHeight()) + "_" + getVPIBackendString(m_backend) + "Backend_" + - GetMemoryTypeAsString(inputLeftImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - return m_stereoDisparityEstimator->execute(outputImage, inputLeftImage, inputRightImage, windowSize, maxDisparity, - m_stream, m_backend); -} - -std::error_code VPITensorStream::StereoDisparityEstimator(Image &outputImage, const Image &inputLeftImage, - const Image &inputRightImage, size_t windowSize, - size_t maxDisparity) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "StereoDisparityEstimator_Y_F32_NV12_" + std::to_string(inputLeftImage.getLumaWidth()) + "X" + - std::to_string(inputLeftImage.getLumaHeight()) + "_" + std::to_string(outputImage.getWidth()) + - "X" + std::to_string(outputImage.getHeight()) + "_" + getVPIBackendString(m_backend) + - "Backend_" + GetMemoryTypeAsString(inputLeftImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - return m_stereoDisparityEstimator->execute(outputImage, inputLeftImage, inputRightImage, windowSize, maxDisparity, - m_stream, m_backend); -} - -std::error_code VPITensorStream::StereoDisparityEstimator(Image &outputImage, const Image &inputLeftImage, - const Image &inputRightImage, size_t windowSize, - size_t maxDisparity) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "StereoDisparityEstimator_Y_F32_NV24_" + std::to_string(inputLeftImage.getLumaWidth()) + "X" + - std::to_string(inputLeftImage.getLumaHeight()) + "_" + std::to_string(outputImage.getWidth()) + - "X" + std::to_string(outputImage.getHeight()) + "_" + getVPIBackendString(m_backend) + - "Backend_" + GetMemoryTypeAsString(inputLeftImage.isCPU()) + "Input"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - return m_stereoDisparityEstimator->execute(outputImage, inputLeftImage, inputRightImage, windowSize, maxDisparity, - m_stream, m_backend); -} - -TensorBackend VPITensorContext::Backend() const noexcept -{ - return TensorBackend::VPI; -} - -std::error_code VPITensorStream::GenerateWarpFromCameraModel(ImageWarp &warp, const ImageGrid &grid, - const CameraModel &source, const CameraIntrinsics &target) -{ - std::unique_lock scopedLock{m_fence}; -#ifdef NVBENCH_ENABLE - std::string tag = "GenerateWarpFromCameraModel"; - nv::bench::Timer timerFunc = nv::bench::CPU(tag.c_str(), nv::bench::Flag::DEFAULT); -#endif - - VPIStatus status = VPI_SUCCESS; - - VPIWarpMap map = {0}; - map.grid.numHorizRegions = grid.numHorizRegions; - for (std::size_t i = 0; i < static_cast(grid.numHorizRegions); i++) - { - map.grid.regionWidth[i] = grid.regionWidth[i]; - map.grid.horizInterval[i] = grid.horizInterval[i]; - } - map.grid.numVertRegions = grid.numVertRegions; - for (std::size_t i = 0; i < static_cast(grid.numVertRegions); i++) - { - map.grid.regionHeight[i] = grid.regionHeight[i]; - map.grid.vertInterval[i] = grid.vertInterval[i]; - } - status = vpiWarpMapAllocData(&map); - - if ((status == VPI_SUCCESS) && (map.keypoints)) - { - switch (source.distortion.type) - { - case CameraDistortionType::Polynomial: - { - VPIPolynomialLensDistortionModel distortion; - distortion.k1 = source.distortion.k1; - distortion.k2 = source.distortion.k2; - distortion.k3 = source.distortion.k3; - distortion.k4 = source.distortion.k4; - distortion.k5 = source.distortion.k5; - distortion.k6 = source.distortion.k6; - distortion.p1 = source.distortion.p1; - distortion.p2 = source.distortion.p2; - status = vpiWarpMapGenerateFromPolynomialLensDistortionModel( - source.intrinsic.m_intrinsics, source.extrinsic.m_extrinsics, target.m_intrinsics, &distortion, &map); - break; - } - case CameraDistortionType::FisheyeEquidistant: - { - VPIFisheyeLensDistortionModel distortion; - distortion.k1 = source.distortion.k1; - distortion.k2 = source.distortion.k2; - distortion.k3 = source.distortion.k3; - distortion.k4 = source.distortion.k4; - distortion.mapping = VPI_FISHEYE_EQUIDISTANT; - status = vpiWarpMapGenerateFromFisheyeLensDistortionModel( - source.intrinsic.m_intrinsics, source.extrinsic.m_extrinsics, target.m_intrinsics, &distortion, &map); - break; - } - case CameraDistortionType::FisheyeEquisolid: - { - VPIFisheyeLensDistortionModel distortion; - distortion.k1 = source.distortion.k1; - distortion.k2 = source.distortion.k2; - distortion.k3 = source.distortion.k3; - distortion.k4 = source.distortion.k4; - distortion.mapping = VPI_FISHEYE_EQUISOLID; - status = vpiWarpMapGenerateFromFisheyeLensDistortionModel( - source.intrinsic.m_intrinsics, source.extrinsic.m_extrinsics, target.m_intrinsics, &distortion, &map); - break; - } - case CameraDistortionType::FisheyeOrthoGraphic: - { - VPIFisheyeLensDistortionModel distortion; - distortion.k1 = source.distortion.k1; - distortion.k2 = source.distortion.k2; - distortion.k3 = source.distortion.k3; - distortion.k4 = source.distortion.k4; - distortion.mapping = VPI_FISHEYE_ORTHOGRAPHIC; - status = vpiWarpMapGenerateFromFisheyeLensDistortionModel( - source.intrinsic.m_intrinsics, source.extrinsic.m_extrinsics, target.m_intrinsics, &distortion, &map); - break; - } - case CameraDistortionType::FisheyeStereographic: - { - VPIFisheyeLensDistortionModel distortion; - distortion.k1 = source.distortion.k1; - distortion.k2 = source.distortion.k2; - distortion.k3 = source.distortion.k3; - distortion.k4 = source.distortion.k4; - distortion.mapping = VPI_FISHEYE_STEREOGRAPHIC; - status = vpiWarpMapGenerateFromFisheyeLensDistortionModel( - source.intrinsic.m_intrinsics, source.extrinsic.m_extrinsics, target.m_intrinsics, &distortion, &map); - break; - } - default: - status = VPI_ERROR_INVALID_ARGUMENT; - break; - } - } - - if ((status == VPI_SUCCESS) && (map.keypoints)) - { - if (warp != nullptr) - { - vpiPayloadDestroy(reinterpret_cast(warp)->payload); - delete warp; - } - warp = new VPIImageWarp; - status = vpiCreateRemap(m_backend, &map, &(reinterpret_cast(warp)->payload)); - } - - // Delete map after payload is generated - vpiWarpMapFreeData(&map); - - return make_error_code(status); -} - -std::error_code VPITensorStream::DestroyWarp(ImageWarp &warp) noexcept -{ - std::unique_lock scopedLock{m_fence}; - if (warp != nullptr) - { - try - { - vpiPayloadDestroy(reinterpret_cast(warp)->payload); - } - catch (std::error_code &e) - { - return e; - } - - delete reinterpret_cast(warp); - warp = nullptr; - } - return make_error_code(ErrorCode::SUCCESS); -} - -}} // namespace cvcore::tensor_ops diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPITensorOperators.h b/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPITensorOperators.h deleted file mode 100644 index e01ed11..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPITensorOperators.h +++ /dev/null @@ -1,272 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#ifndef CVCORE_VPITENSOROPERATORS_H -#define CVCORE_VPITENSOROPERATORS_H - -#include -#include - -#include -#include -#include -#include - -#include "cv/core/CameraModel.h" -#include "cv/tensor_ops/ITensorOperatorContext.h" -#include "cv/tensor_ops/ITensorOperatorStream.h" - -#include "VPIEnumMapping.h" - -namespace cvcore { namespace tensor_ops { - -/** - * Returns the corresponding VPI backend given the cvcore compute engine. - * @param computeEngine Compute engine used. - * @return Returns the VPIbackend. - */ -VPIBackend getVPIBackend(const ComputeEngine &computeEngine); - -/** - * Wraps a CVCore Image type into a VPIImage - * @param vpiImage VPIImage - * @param imgdata VPIImage data - * @param cvcoreImage CVCore image - * @param backend Compute backend - * @return error code - */ -template -std::error_code CreateVPIImageWrapper(VPIImage &vpiImg, VPIImageData &imgdata, const Image &cvcoreImage, VPIBackend backend); - -/** - * Update VPI Image data pointer - * @param vpiImage VPIImage - * @param imgdata VPIImage data - * @param isCPU data is on CPU or GPU - * @return error code - */ -std::error_code UpdateVPIImageWrapper(VPIImage &image, VPIImageData &imageWrap, bool isCPU); - -/** - * Destory Wrapped VPI Image - * @param vpiImage VPIImage - * @param imgdata VPIImage data - * @return error code - */ -std::error_code DestroyVPIImageWrapper(VPIImage &image, VPIImageData &imageWrap); - -/** - * Update VPI Image given CVCORE Image - * @param vpiImage VPIImage - * @param vpiImageData VPIImage data - * @param image CVCORE Image - * @return error code - */ -template::value>::type * = nullptr> -std::error_code UpdateImage(VPIImage &vpiImage, VPIImageData &vpiImageData, const Image &image) -{ - using D = typename Image::DataType; - vpiImageData.buffer.pitch.planes[0].data = const_cast(image.getData()); - return UpdateVPIImageWrapper(vpiImage, vpiImageData, image.isCPU()); -} - -/** - * Update VPI Image given CVCORE Image - * @param vpiImage VPIImage - * @param vpiImageData VPIImage data - * @param image CVCORE Image - * @return error code - */ -template::value>::type * = nullptr> -std::error_code UpdateImage(VPIImage &vpiImage, VPIImageData &vpiImageData, const Image &image) -{ - vpiImageData.buffer.pitch.planes[0].data = const_cast(image.getLumaData()); - vpiImageData.buffer.pitch.planes[1].data = const_cast(image.getChromaData()); - return UpdateVPIImageWrapper(vpiImage, vpiImageData, image.isCPU()); -} - -/** - * Check if params of VPIImageData is consistent with the given CVCORE Image - * @param vpiImageData VPIImage data - * @param image CVCORE Image - * @return whether param changed - */ -template::value && !IsPlanarImage::value>::type * = nullptr> -bool CheckParamsChanged(VPIImageData &vpiImageData, const Image &image) -{ - bool paramsChanged = false; - // Did format change - paramsChanged = paramsChanged || vpiImageData.buffer.pitch.format != ToVpiImageFormat(T); - // Did image dimension change - paramsChanged = - paramsChanged || (vpiImageData.buffer.pitch.planes[0].height != static_cast(image.getHeight()) || - vpiImageData.buffer.pitch.planes[0].width != static_cast(image.getWidth())); - return paramsChanged; -} - -/** - * Check if params of VPIImageData is consistent with the given CVCORE Image - * @param vpiImageData VPIImage data - * @param image CVCORE Image - * @return whether param changed - */ -template::value>::type * = nullptr> -bool CheckParamsChanged(VPIImageData &vpiImageData, const Image &image) -{ - bool paramsChanged = false; - - // Did format change - paramsChanged = paramsChanged || vpiImageData.buffer.pitch.format != ToVpiImageFormat(T); - - // Did image dimension change - paramsChanged = paramsChanged || - (vpiImageData.buffer.pitch.planes[0].height != static_cast(image.getLumaHeight()) || - vpiImageData.buffer.pitch.planes[0].width != static_cast(image.getLumaWidth()) || - vpiImageData.buffer.pitch.planes[1].height != static_cast(image.getChromaHeight()) || - vpiImageData.buffer.pitch.planes[1].width != static_cast(image.getChromaWidth())); - return paramsChanged; -} - -/** - * Implementation of VPITensorContext - */ -class VPITensorContext : public ITensorOperatorContext -{ -public: - /** - * Default Constructor for VPI Context. - */ - VPITensorContext() = default; - - /** - * Default Destructor for VPI Context. - */ - ~VPITensorContext() = default; - - /** - * Creates a stream based on compute engine - * @param computeEngine CVCore Compute engine - * @return Pointer to ITensorOperatorStream object. - */ - virtual std::error_code CreateStream(TensorOperatorStream &, const ComputeEngine &computeEngine) override; - - /** - * Destroy stream creates. - * @param inputStream Input stream to be deleted - * @return Error code - */ - virtual std::error_code DestroyStream(TensorOperatorStream &inputStream) override; - - /** - * Checks if stream type is supported for a given backend. - * @param computeEngine CVCore Compute engine - * @return true if stream type is available. - */ - virtual bool IsComputeEngineCompatible(const ComputeEngine &computeEngine) const noexcept override; - - /** - * Returns the backend type - */ - virtual TensorBackend Backend() const noexcept override; - -private: -}; - -/** - * Implementation of VPITensorStream - */ -class VPITensorStream : public ITensorOperatorStream -{ -public: - virtual std::error_code Status() noexcept override; - - virtual std::error_code GenerateWarpFromCameraModel(ImageWarp &warp, const ImageGrid &grid, - const CameraModel &source, - const CameraIntrinsics &target) override; - virtual std::error_code DestroyWarp(ImageWarp &warp) noexcept override; - - virtual std::error_code Remap(Image &outputImage, const Image &inputImage, const ImageWarp warp, - InterpolationType interpolation, BorderType border) override; - - virtual std::error_code Remap(Image &outputImage, const Image &inputImage, const ImageWarp warp, - InterpolationType interpolation, BorderType border) override; - - virtual std::error_code Remap(Image &outputImage, const Image &inputImage, const ImageWarp warp, - InterpolationType interpolation, BorderType border) override; - - virtual std::error_code Remap(Image &outputImage, const Image &inputImage, const ImageWarp warp, - InterpolationType interpolation, BorderType border) override; - - virtual std::error_code Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation, BorderType border) override; - virtual std::error_code Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation, BorderType border) override; - virtual std::error_code Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation, BorderType border) override; - virtual std::error_code Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation, BorderType border) override; - virtual std::error_code Resize(Image &outputImage, const Image &inputImage, - InterpolationType interpolation, BorderType border) override; - - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) override; - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) override; - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) override; - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) override; - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) override; - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) override; - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) override; - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) override; - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) override; - virtual std::error_code ColorConvert(Image &outputImage, const Image &inputImage) override; - virtual std::error_code StereoDisparityEstimator(Image &outputImage, const Image &inputLeftImage, - const Image &inputRightImage, size_t windowSize, - size_t maxDisparity) override; - virtual std::error_code StereoDisparityEstimator(Image &outputImage, const Image &inputLeftImage, - const Image &inputRightImage, size_t windowSize, - size_t maxDisparity) override; - virtual std::error_code StereoDisparityEstimator(Image &outputImage, const Image &inputLeftImage, - const Image &inputRightImage, size_t windowSize, - size_t maxDisparity) override; - -protected: - friend class VPITensorContext; - - VPITensorStream(const ComputeEngine &computeEngine); - ~VPITensorStream(); - -private: - class VPIResizeImpl; - class VPIRemapImpl; - class VPIColorConvertImpl; - class VPIStereoDisparityEstimatorImpl; - - mutable std::mutex m_fence; - - std::unique_ptr m_resizer; - std::unique_ptr m_remapper; - std::unique_ptr m_colorConverter; - std::unique_ptr m_stereoDisparityEstimator; - - VPIStream m_stream; - VPIBackend m_backend; -}; - -}} // namespace cvcore::tensor_ops - -#endif // CVCORE_VPITENSOROPERATORS_H diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/trtbackend/TRTBackend.cpp b/isaac_ros_ess/gxf/ess/cvcore/src/trtbackend/TRTBackend.cpp deleted file mode 100644 index aa315e5..0000000 --- a/isaac_ros_ess/gxf/ess/cvcore/src/trtbackend/TRTBackend.cpp +++ /dev/null @@ -1,634 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "cv/trtbackend/TRTBackend.h" - -#include "NvInfer.h" -#include "NvOnnxConfig.h" -#include "NvOnnxParser.h" -#include "NvUffParser.h" -#include "NvUtils.h" - -#include -#include -#include -#include - -namespace cvcore { - -namespace { - - -void WriteSerializedEngineToFile(const char *data, size_t engineSize, std::string &outputFile) -{ - std::ofstream outFile(outputFile.c_str(), std::ios::binary); - if (!outFile.is_open()) - { - throw std::runtime_error("Cannot open file to write serialized Engine. Permissions? "); - } - else - { - outFile.write(data, engineSize); - outFile.close(); - } -} - -nvinfer1::Dims4 GetTRTBlobDimension(int batch, int channels, int height, int width, TRTBackendBlobLayout layout) -{ - nvinfer1::Dims4 dims; - switch (layout) - { - case TRTBackendBlobLayout::PLANAR: - { - dims = {batch, channels, height, width}; - break; - } - case TRTBackendBlobLayout::INTERLEAVED: - { - dims = {batch, height, width, channels}; - break; - } - default: - { - throw std::runtime_error("Only PLANAR and INTERLEAVED types allowed"); - } - } - return dims; -} - -nvinfer1::Dims3 GetTRTBlobDimension(int channels, int height, int width, TRTBackendBlobLayout layout) -{ - nvinfer1::Dims3 dims; - switch (layout) - { - case TRTBackendBlobLayout::PLANAR: - { - dims = {channels, height, width}; - break; - } - case TRTBackendBlobLayout::INTERLEAVED: - { - dims = {height, width, channels}; - break; - } - default: - { - throw std::runtime_error("Only PLANAR and INTERLEAVED types allowed"); - } - } - return dims; -} - -bool SetupProfile(nvinfer1::IOptimizationProfile *profile, nvinfer1::INetworkDefinition *network, - TRTBackendParams ¶ms) -{ - // This shouldn't be hard-coded rather should be set by the user. - int kMINBatchSize = 1; - int kMAXBatchSize = 32; - - if (kMAXBatchSize < params.batchSize) - { - throw std::runtime_error("Max batch size is hard-coded to 32."); - } - - bool hasDynamicShape = false; - for (int i = 0; i < network->getNbInputs(); i++) - { - auto input = network->getInput(i); - nvinfer1::Dims dims = input->getDimensions(); - const bool isDynamicInput = std::any_of(dims.d, dims.d + dims.nbDims, [](int dim) { return dim == -1; }); - if (isDynamicInput) - { - hasDynamicShape = true; - auto it = std::find(params.inputLayers.begin(), params.inputLayers.end(), std::string(input->getName())); - if (it == params.inputLayers.end()) - { - throw std::runtime_error("Undefined dynamic input shape"); - } - int pos = it - params.inputLayers.begin(); - auto inputDim = params.inputDims[pos]; - profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMIN, - GetTRTBlobDimension(kMINBatchSize, inputDim.channels, inputDim.height, - inputDim.width, params.inputLayout)); - profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kOPT, - GetTRTBlobDimension(params.batchSize, inputDim.channels, inputDim.height, - inputDim.width, params.inputLayout)); - profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMAX, - GetTRTBlobDimension(kMAXBatchSize, inputDim.channels, inputDim.height, - inputDim.width, params.inputLayout)); - } - } - return hasDynamicShape; -} - -nvinfer1::DataType GetTRTDataType(TRTBackendPrecision precision) -{ - nvinfer1::DataType dataType; - switch (precision) - { - case TRTBackendPrecision::INT8: - { - dataType = nvinfer1::DataType::kINT8; - break; - } - case TRTBackendPrecision::FP16: - { - dataType = nvinfer1::DataType::kHALF; - break; - } - case TRTBackendPrecision::FP32: - { - dataType = nvinfer1::DataType::kFLOAT; - break; - } - default: - { - dataType = nvinfer1::DataType::kFLOAT; - break; - } - } - return dataType; -} - -nvuffparser::UffInputOrder GetTRTInputOrder(TRTBackendBlobLayout layout) -{ - nvuffparser::UffInputOrder order; - switch (layout) - { - case TRTBackendBlobLayout::PLANAR: - { - order = nvuffparser::UffInputOrder::kNCHW; - break; - } - case TRTBackendBlobLayout::INTERLEAVED: - { - order = nvuffparser::UffInputOrder::kNHWC; - break; - } - default: - { - throw std::runtime_error("Only PLANAR and INTERLEAVED types allowed"); - } - } - return order; -} - -} // anonymous namespace - -class TRTLogger : public nvinfer1::ILogger -{ - -public: - nvinfer1::ILogger &getLogger() - { - return *this; - } - - void log(nvinfer1::ILogger::Severity severity, const char *msg) noexcept override - { - switch (severity) - { - case nvinfer1::ILogger::Severity::kINTERNAL_ERROR: - { - std::cout << msg << std::endl; - break; - } - case nvinfer1::ILogger::Severity::kERROR: - { - std::cout << msg << std::endl; - break; - } - case nvinfer1::ILogger::Severity::kWARNING: - { - std::cout << msg << std::endl; - break; - } - case nvinfer1::ILogger::Severity::kINFO: - { - std::cout << msg << std::endl; - break; - } - default: - { - std::cout << msg << std::endl; - break; - } - } - } -}; - -struct TRTImpl -{ - TRTImpl() - : m_logger(new TRTLogger()) - , m_TRTRuntime(nullptr, [](nvinfer1::IRuntime *runtime) { runtime->destroy(); }) - , m_inferenceEngine(nullptr) - , m_ownedInferenceEngine(nullptr, [](nvinfer1::ICudaEngine *eng) { eng->destroy(); }) - , m_inferContext(nullptr, [](nvinfer1::IExecutionContext *ectx) { ectx->destroy(); }) - , m_cudaStream(0) - { - } - - std::unique_ptr m_logger; - std::unique_ptr m_TRTRuntime; - nvinfer1::ICudaEngine *m_inferenceEngine; - std::unique_ptr m_ownedInferenceEngine; - std::unique_ptr m_inferContext; - - cudaStream_t m_cudaStream; - int m_bindingsCount = 0; - int m_batchSize = 1; - bool m_explicitBatch = false; - TRTBackendPrecision m_precision = TRTBackendPrecision::FP32; - std::unordered_map m_blobMap; - - void loadNetWorkFromFile(const char *modelFilePath); - void loadNetWorkFromUff(TRTBackendParams ¶ms); - void loadNetWorkFromOnnx(TRTBackendParams ¶ms); - void loadFromMemoryPointer(void *engine); - // find the input/output bindings - void setupIO(int batchSize); -}; - -void TRTImpl::loadNetWorkFromFile(const char *modelFilePath) -{ - // Initialize TRT engine and deserialize it from file - std::ifstream trtModelFStream(modelFilePath, std::ios::binary); - std::unique_ptr trtModelContent; - size_t trtModelContentSize = 0; - if (!trtModelFStream.good()) - { - std::cerr << "Model File: " << modelFilePath << std::endl; - throw std::runtime_error("TensorRT: Model file not found."); - } - else - { - trtModelFStream.seekg(0, trtModelFStream.end); - trtModelContentSize = trtModelFStream.tellg(); - trtModelFStream.seekg(0, trtModelFStream.beg); - trtModelContent.reset(new char[trtModelContentSize]); - trtModelFStream.read(trtModelContent.get(), trtModelContentSize); - trtModelFStream.close(); - std::cout << "Deserializing engine from: " << modelFilePath; - } - m_TRTRuntime.reset(nvinfer1::createInferRuntime(*(m_logger.get()))); - m_inferenceEngine = dynamic_cast( - m_TRTRuntime->deserializeCudaEngine(trtModelContent.get(), trtModelContentSize, nullptr)); - m_ownedInferenceEngine.reset(m_inferenceEngine); - m_inferContext.reset(m_inferenceEngine->createExecutionContext()); - m_inferContext->setOptimizationProfile(0); -} - -void TRTImpl::loadNetWorkFromOnnx(TRTBackendParams ¶ms) -{ - if (!params.explicitBatch) - { - std::cerr << "ONNX model only supports explicit batch size"; - } - std::ifstream f(params.enginePath); - if (f.good()) - { - loadNetWorkFromFile(params.enginePath.c_str()); - return; - } - auto builder = nvinfer1::createInferBuilder(*(m_logger.get())); - auto config = builder->createBuilderConfig(); - auto batchFlag = 1U << static_cast(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); - auto network = builder->createNetworkV2(batchFlag); - auto parser = nvonnxparser::createParser(*network, *(m_logger.get())); - - // Force FP32 - if (!builder->platformHasFastFp16()) - { - m_precision = TRTBackendPrecision::FP32; - } - - // Configuration - builder->setMaxBatchSize(params.batchSize); - if (!parser->parseFromFile(params.weightsPath.c_str(), 0)) - { - std::cerr << "Fail to parse"; - } - config->setMaxWorkspaceSize(1 << 30); - if (m_precision == TRTBackendPrecision::FP16) - { - config->setFlag(nvinfer1::BuilderFlag::kFP16); - } - - auto profile = builder->createOptimizationProfile(); - if (SetupProfile(profile, network, params)) - { - config->addOptimizationProfile(profile); - } - - // Build the engine - m_inferenceEngine = builder->buildEngineWithConfig(*network, *config); - if (m_inferenceEngine == nullptr) - { - throw std::runtime_error("TensorRT: unable to create engine"); - } - - m_ownedInferenceEngine.reset(m_inferenceEngine); - m_inferContext.reset(m_inferenceEngine->createExecutionContext()); - - network->destroy(); - builder->destroy(); - config->destroy(); - - auto serializedEngine = m_inferenceEngine->serialize(); - WriteSerializedEngineToFile(static_cast(serializedEngine->data()), serializedEngine->size(), - params.enginePath); -} - -void TRTImpl::loadNetWorkFromUff(TRTBackendParams ¶ms) -{ - auto builder = nvinfer1::createInferBuilder(*(m_logger.get())); - auto config = builder->createBuilderConfig(); - auto batchFlag = params.explicitBatch - ? 1U << static_cast(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH) - : 0U; - auto network = builder->createNetworkV2(batchFlag); - auto parser = nvuffparser::createUffParser(); - - std::ifstream f(params.enginePath); - if (f.good()) - { - loadNetWorkFromFile(params.enginePath.c_str()); - return; - } - nvinfer1::DataType dataType = GetTRTDataType(params.precision); - - // Force FP32 - if (!builder->platformHasFastFp16()) - { - dataType = nvinfer1::DataType::kFLOAT; - m_precision = TRTBackendPrecision::FP32; - } - - // Register uff input - for (int i = 0; i < params.inputLayers.size(); i++) - { - if (params.explicitBatch) - { - parser->registerInput( - params.inputLayers[i].c_str(), - GetTRTBlobDimension(params.batchSize, params.inputDims[i].channels, params.inputDims[i].height, - params.inputDims[i].width, params.inputLayout), - GetTRTInputOrder(params.inputLayout)); - } - else - { - parser->registerInput(params.inputLayers[i].c_str(), - GetTRTBlobDimension(params.inputDims[i].channels, params.inputDims[i].height, - params.inputDims[i].width, params.inputLayout), - GetTRTInputOrder(params.inputLayout)); - } - } - - // Register uff output - for (int i = 0; i < params.outputLayers.size(); i++) - { - parser->registerOutput(params.outputLayers[i].c_str()); - } - - // Parse uff model - if (!parser->parse(params.weightsPath.c_str(), *network, dataType)) - { - std::cerr << "Fail to parse"; - } - - // Configuration - if (params.explicitBatch) - { - auto profile = builder->createOptimizationProfile(); - if (SetupProfile(profile, network, params)) - { - config->addOptimizationProfile(profile); - } - } - else - { - builder->setMaxBatchSize(params.batchSize); - } - - config->setMaxWorkspaceSize(1 << 30); - if (m_precision == TRTBackendPrecision::FP16) - { - config->setFlag(nvinfer1::BuilderFlag::kFP16); - } - - // Build the engine - m_inferenceEngine = builder->buildEngineWithConfig(*network, *config); - if (m_inferenceEngine == nullptr) - { - throw std::runtime_error("TensorRT: unable to create engine"); - } - - m_ownedInferenceEngine.reset(m_inferenceEngine); - m_inferContext.reset(m_inferenceEngine->createExecutionContext()); - - network->destroy(); - builder->destroy(); - config->destroy(); - - auto serializedEngine = m_inferenceEngine->serialize(); - WriteSerializedEngineToFile(static_cast(serializedEngine->data()), serializedEngine->size(), - params.enginePath); -} - -void TRTImpl::loadFromMemoryPointer(void *engine) -{ - m_inferenceEngine = static_cast(engine); - m_inferContext.reset(m_inferenceEngine->createExecutionContext()); -} - -void TRTImpl::setupIO(int batchSize) -{ - // @TODO: use getBindingDimensions to avoid re-setting the IO. - m_bindingsCount = m_inferenceEngine->getNbBindings(); - for (int i = 0; i < m_bindingsCount; i++) - { - m_blobMap[std::string(m_inferenceEngine->getBindingName(i))] = i; - if (m_inferenceEngine->bindingIsInput(i)) - { - nvinfer1::Dims dims_i(m_inferenceEngine->getBindingDimensions(i)); - nvinfer1::Dims4 inputDims{batchSize, dims_i.d[1], dims_i.d[2], dims_i.d[3]}; - m_inferContext->setBindingDimensions(i, inputDims); - } - } -} - -TRTBackend::TRTBackend(const char *modelFilePath, TRTBackendPrecision precision, int batchSize, bool explicitBatch) - : m_pImpl(new TRTImpl()) -{ - m_pImpl->m_precision = precision; - m_pImpl->m_batchSize = batchSize; - m_pImpl->m_explicitBatch = explicitBatch; - m_pImpl->loadNetWorkFromFile(modelFilePath); - m_pImpl->setupIO(m_pImpl->m_batchSize); -} - -TRTBackend::TRTBackend(TRTBackendParams &inputParams) - : m_pImpl(new TRTImpl()) -{ - m_pImpl->m_precision = inputParams.precision; - m_pImpl->m_batchSize = inputParams.batchSize; - m_pImpl->m_explicitBatch = inputParams.explicitBatch; - switch (inputParams.modelType) - { - case ModelType::ONNX: - { - m_pImpl->loadNetWorkFromOnnx(inputParams); - break; - } - case ModelType::UFF: - { - m_pImpl->loadNetWorkFromUff(inputParams); - break; - } - case ModelType::TRT_ENGINE: - { - m_pImpl->loadNetWorkFromFile(inputParams.enginePath.c_str()); - break; - } - case ModelType::TRT_ENGINE_IN_MEMORY: - { - m_pImpl->loadFromMemoryPointer(inputParams.trtEngineInMemory); - break; - } - default: - { - throw std::runtime_error( - "Only Model types ONNX, UFF, TensorRT " - "serialized engines and a pointer to deserialized " - "ICudaEngine are supported\n"); - } - } - m_pImpl->setupIO(m_pImpl->m_batchSize); -} - -TRTBackend::~TRTBackend() {} - -void TRTBackend::infer(void **buffer) -{ - bool success = true; - if (!m_pImpl->m_inferenceEngine->hasImplicitBatchDimension()) - { - m_pImpl->m_inferContext->enqueueV2(buffer, m_pImpl->m_cudaStream, nullptr); - } - else - { - m_pImpl->m_inferContext->enqueue(m_pImpl->m_batchSize, buffer, m_pImpl->m_cudaStream, nullptr); - } - - if (!success) - { - throw std::runtime_error("TensorRT: Inference failed"); - } -} - -void TRTBackend::infer(void **buffer, int batchSize, cudaStream_t stream) -{ - //@TODO: fix kMin, kOpt, kMax batch size in SetupProfile() call and then add a check here. - m_pImpl->setupIO(batchSize); - - bool success = true; - if (!m_pImpl->m_inferenceEngine->hasImplicitBatchDimension()) - { - m_pImpl->m_inferContext->enqueueV2(buffer, stream, nullptr); - } - else - { - m_pImpl->m_inferContext->enqueue(batchSize, buffer, stream, nullptr); - } - - if (!success) - { - throw std::runtime_error("TensorRT: Inference failed"); - } -} - -cudaStream_t TRTBackend::getCUDAStream() const -{ - return m_pImpl->m_cudaStream; -} - -void TRTBackend::setCUDAStream(cudaStream_t stream) -{ - m_pImpl->m_cudaStream = stream; -} - -int TRTBackend::getBlobCount() const -{ - return m_pImpl->m_bindingsCount; -} - -TRTBackendBlobSize TRTBackend::getTRTBackendBlobSize(int blobIndex) const -{ - if (blobIndex >= m_pImpl->m_bindingsCount) - { - throw std::runtime_error("blobIndex out of range"); - } - auto dim = m_pImpl->m_inferenceEngine->getBindingDimensions(blobIndex); - TRTBackendBlobSize blobSize; - if (dim.nbDims == 2) - { - blobSize = {1, dim.d[0], dim.d[1]}; - } - else if (dim.nbDims == 3) - { - blobSize = {dim.d[0], dim.d[1], dim.d[2]}; - } - else if (dim.nbDims == 4) - { - blobSize = {dim.d[1], dim.d[2], dim.d[3]}; - } - else - { - throw std::runtime_error("Unknown TensorRT binding dimension!"); - } - return blobSize; -} - -int TRTBackend::getBlobLinearSize(int blobIndex) const -{ - const TRTBackendBlobSize shape = getTRTBackendBlobSize(blobIndex); - nvinfer1::Dims3 dims_val{shape.channels, shape.height, shape.width}; - int blobSize = 1; - for (int i = 0; i < 3; i++) - { - blobSize *= dims_val.d[i] <= 0 ? 1 : dims_val.d[i]; - } - return blobSize; -} - -int TRTBackend::getBlobIndex(const char *blobName) const -{ - auto blobItr = m_pImpl->m_blobMap.find(std::string(blobName)); - if (blobItr == m_pImpl->m_blobMap.end()) - { - throw std::runtime_error("blobName not found"); - } - return blobItr->second; -} - -bool TRTBackend::bindingIsInput(const int index) const -{ - return m_pImpl->m_inferenceEngine->bindingIsInput(index); -} - -} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/extensions/ess/components/ess_inference.cpp b/isaac_ros_ess/gxf/ess/extensions/ess/components/ess_inference.cpp new file mode 100644 index 0000000..49de215 --- /dev/null +++ b/isaac_ros_ess/gxf/ess/extensions/ess/components/ess_inference.cpp @@ -0,0 +1,573 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +#include "extensions/ess/components/ess_inference.hpp" + +#include +#include +#include +#include + +#include "extensions/tensorops/components/ImageUtils.hpp" +#include "extensions/tensorops/core/Core.h" +#include "extensions/tensorops/core/Image.h" +#include "gems/gxf_helpers/expected_macro.hpp" +#include "gems/hash/hash_file.hpp" +#include "gems/video_buffer/allocator.hpp" +#include "gxf/cuda/cuda_stream_id.hpp" +#include "gxf/cuda/cuda_stream_pool.hpp" +#include "gxf/multimedia/camera.hpp" +#include "gxf/multimedia/video.hpp" +#include "gxf/std/tensor.hpp" +#include "gxf/std/timestamp.hpp" + +namespace nvidia { +namespace isaac { + +namespace detail { + +// Function to bind a cuda stream with cid into downstream message +gxf_result_t BindCudaStream(gxf::Entity& message, gxf_uid_t cid) { + if (cid == kNullUid) { + GXF_LOG_ERROR("stream_cid is null"); + return GXF_FAILURE; + } + auto output_stream_id = message.add("stream"); + if (!output_stream_id) { + GXF_LOG_ERROR("failed to add cudastreamid."); + return GXF_FAILURE; + } + output_stream_id.value()->stream_cid = cid; + return GXF_SUCCESS; +} + +// Function to record a new cuda event +gxf_result_t RecordCudaEvent(gxf::Entity& message, gxf::Handle& stream) { + // Create a new event + cudaEvent_t cuda_event; + cudaEventCreateWithFlags(&cuda_event, 0); + gxf::CudaEvent event; + auto ret = event.initWithEvent(cuda_event, stream->dev_id(), [](auto) {}); + if (!ret) { + GXF_LOG_ERROR("failed to init cuda event"); + return GXF_FAILURE; + } + // Record the event + // Can define []() { GXF_LOG_DEBUG("tensorops event synced"); } + // as callback func for debug purpose + ret = stream->record(event.event().value(), + [event = cuda_event, entity = message.clone().value()](auto) { + cudaEventDestroy(event); + }); + if (!ret) { + GXF_LOG_ERROR("record event failed"); + return ret.error(); + } + return GXF_SUCCESS; +} + +template +gxf_result_t PassthroughComponents(gxf::Entity& output, gxf::Entity& input, + const char* name = nullptr) { + auto maybe_component = input.get(name); + if (maybe_component) { + auto output_component = output.add(name != nullptr ? name : maybe_component.value().name()); + if (!output_component) { + GXF_LOG_ERROR("add output component failed."); + return output_component.error(); + } + *(output_component.value()) = *(maybe_component.value()); + } else { + GXF_LOG_INFO("component %s not found.", name); + } + + return GXF_SUCCESS; +} + +gxf::Expected ComputeEnginePath( + const ess::ModelBuildParams& modelBuildParams) { + const SHA256::String onnx_hash = + GXF_UNWRAP_OR_RETURN(hash_file(modelBuildParams.onnx_file_path.c_str())); + + std::string target_dir = "/tmp"; + + char* test_tmpdir = std::getenv("TEST_TMPDIR"); + + if (test_tmpdir) { + target_dir = test_tmpdir; + } else { + char* tmpdir = std::getenv("TMPDIR"); + + if (tmpdir) { + target_dir = tmpdir; + } + } + + std::stringstream path; + + path << target_dir << "/engine_" << onnx_hash.c_str() << + "_" << modelBuildParams.enable_fp16 << + "_" << modelBuildParams.max_workspace_size << + "_" << modelBuildParams.dla_core << + "_" << ess::InferencerVersion << + ".engine"; + + return path.str(); +} + +} // namespace detail + +gxf_result_t ESSInference::registerInterface(gxf::Registrar* registrar) { + gxf::Expected result; + + result &= registrar->parameter( + left_image_name_, "left_image_name", "Left image name", + "The name of the left image to be received", + gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); + result &= registrar->parameter( + right_image_name_, "right_image_name", "Right image name", + "The name of the right image to be received", + gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); + result &= registrar->parameter( + output_name_, "output_name", "Output name", + "The name of the tensor to be passed to next node"); + result &= registrar->parameter( + stream_pool_, "stream_pool", "Stream pool", + "The Cuda Stream pool for allocate cuda stream", + gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); + result &= registrar->parameter( + pool_, "pool", "Pool", + "Memory pool for allocating output data"); + result &= registrar->parameter( + left_image_receiver_, "left_image_receiver", "Left image receiver", + "Receiver to get the left image"); + result &= registrar->parameter( + right_image_receiver_, "right_image_receiver", "Right image receiver", + "Receiver to get the right image"); + result &= registrar->parameter( + output_transmitter_, "output_transmitter", "Output transmitter", + "Transmitter to send the data"); + result &= registrar->parameter( + confidence_transmitter_, "confidence_transmitter", "Confidence transmitter", + "Transmitter to send the confidence data"); + result &= registrar->parameter( + image_type_, "image_type", "Image type", + "Type of input image: BGR_U8 or RGB_U8"); + result &= registrar->parameter( + pixel_mean_, "pixel_mean", "Pixel mean", + "The mean for each channel"); + result &= registrar->parameter( + normalization_, "normalization", "Normalization", + "The normalization for each channel"); + result &= registrar->parameter( + standard_deviation_, "standard_deviation", "Standard deviation", + "The standard deviation for each channel"); + result &= registrar->parameter( + max_batch_size_, "max_batch_size", "Max batch size", + "The max batch size to run inference on"); + result &= registrar->parameter( + model_input_type_, "model_input_type", "Model input type", + "The model input image: BGR_U8 or RGB_U8"); + + result &= registrar->parameter( + force_engine_update_, "force_engine_update", "Force engine update", + "Flag that indicates always update engine", false); + result &= registrar->parameter( + onnx_file_path_, "onnx_file_path", "ONNX file path", + "The path to the onnx model file"); + result &= registrar->parameter( + enable_fp16_, "enable_fp16", "Enable FP16", + "Flag to enable FP16 in engine generation", true); + result &= registrar->parameter( + max_workspace_size_, "max_workspace_size", "Max Workspace Size", + "Size of working space in bytes. Default to 64MB", 64L * (1L << 20)); + result &= registrar->parameter( + dla_core_, "dla_core", "DLA Core", + "DLA Core to use. Fallback to GPU is always enabled. " + "Default to use GPU only.", + gxf::Registrar::NoDefaultParameter(), + GXF_PARAMETER_FLAGS_OPTIONAL); + + result &= registrar->parameter( + engine_file_path_, "engine_file_path", "Engine file path", + "The path to the serialized TRT engine (default is to auto-generated unique name)", + gxf::Registrar::NoDefaultParameter(), + GXF_PARAMETER_FLAGS_OPTIONAL); + result &= registrar->parameter( + input_layers_name_, "input_layers_name", "Input layer names", + "The names of the input layers"); + result &= registrar->parameter( + output_layers_name_, "output_layers_name", "Output layer names", + "The names of the output layers"); + + result &= registrar->parameter( + preprocess_type_, "preprocess_type", "Preprocess type", + "The type of ESS preprocessing: RESIZE / CENTER_CROP"); + result &= registrar->parameter( + timestamp_policy_, "timestamp_policy", "Timestamp policy", + "Input channel to get timestamp 0(left)/1(right)", 0); + + return gxf::ToResultCode(result); +} + +gxf_result_t ESSInference::start() { + using ImageType = cvcore::tensor_ops::ImageType; + + // Allocate cuda stream using stream pool if necessary + if (stream_pool_.try_get()) { + auto stream = stream_pool_.try_get().value()->allocateStream(); + if (!stream) { + GXF_LOG_ERROR("allocating stream failed."); + return GXF_FAILURE; + } + cuda_stream_ = std::move(stream.value()); + if (!cuda_stream_->stream()) { + GXF_LOG_ERROR("allocated stream is not initialized."); + return GXF_FAILURE; + } + } + // Setting image pre-processing params for ESS + const auto& pixel_mean_vec = pixel_mean_.get(); + const auto& normalization_vec = normalization_.get(); + const auto& standard_deviation_vec = standard_deviation_.get(); + if (pixel_mean_vec.size() != 3 || normalization_vec.size() != 3 || + standard_deviation_vec.size() != 3) { + GXF_LOG_ERROR("Invalid preprocessing params."); + return GXF_FAILURE; + } + if (image_type_.get() == "BGR_U8") { + preprocessor_params_.imgType = ImageType::BGR_U8; + } else if (image_type_.get() == "RGB_U8") { + preprocessor_params_.imgType = ImageType::RGB_U8; + } else { + GXF_LOG_INFO("Wrong input image type. BGR_U8 and RGB_U8 are only supported."); + return GXF_FAILURE; + } + std::copy(pixel_mean_vec.begin(), pixel_mean_vec.end(), preprocessor_params_.pixelMean); + std::copy(normalization_vec.begin(), + normalization_vec.end(), + preprocessor_params_.normalization); + std::copy(standard_deviation_vec.begin(), + standard_deviation_vec.end(), + preprocessor_params_.stdDev); + + // Setting model input params for ESS + model_input_params_.maxBatchSize = max_batch_size_.get(); + if (model_input_type_.get() == "BGR_U8") { + model_input_params_.modelInputType = ImageType::BGR_U8; + } else if (model_input_type_.get() == "RGB_U8") { + model_input_params_.modelInputType = ImageType::RGB_U8; + } else { + GXF_LOG_INFO("Wrong model input type. BGR_U8 and RGB_U8 are only supported."); + return GXF_FAILURE; + } + + // Setting engine build params + auto maybe_dla_core = dla_core_.try_get(); + const int64_t dla_core = dla_core_.try_get().value_or(-1); + + model_build_params_ = {force_engine_update_.get(), onnx_file_path_.get(), + enable_fp16_.get(), max_workspace_size_.get(), dla_core}; + + gxf::Expected maybe_engine_file_path = engine_file_path_.try_get(); + std::string engine_file_path; + + if (maybe_engine_file_path) { + engine_file_path = maybe_engine_file_path.value(); + } else { + engine_file_path = + GXF_UNWRAP_OR_RETURN(detail::ComputeEnginePath(model_build_params_)); + } + + // Setting inference params for ESS + inference_params_ = { + engine_file_path, input_layers_name_.get(), output_layers_name_.get()}; + + // Setting extra params for ESS + if (preprocess_type_.get() == "RESIZE") { + extra_params_ = {ess::PreProcessType::RESIZE}; + } else if (preprocess_type_.get() == "CENTER_CROP") { + extra_params_ = {ess::PreProcessType::CENTER_CROP}; + } else { + GXF_LOG_ERROR("Invalid preprocessing type."); + return GXF_FAILURE; + } + + // Setting ESS object with the provided params + ess_.reset(new ess::ESS(preprocessor_params_, model_input_params_, + model_build_params_, inference_params_, extra_params_)); + + return GXF_SUCCESS; +} + +gxf_result_t ESSInference::tick() { + using ImageType = cvcore::tensor_ops::ImageType; + using TensorLayout = cvcore::tensor_ops::TensorLayout; + using ChannelType = cvcore::tensor_ops::ChannelType; + using ChannelCount = cvcore::tensor_ops::ChannelCount; + using Tensor_NHWC_C1_F32 = cvcore::tensor_ops::Tensor; + using Tensor_NCHW_C3_F32 = cvcore::tensor_ops::Tensor; + using Tensor_NHWC_C3_U8 = cvcore::tensor_ops::Tensor; + + // Get a CUDA stream for execution + cudaStream_t cuda_stream = 0; + if (!cuda_stream_.is_null()) { + cuda_stream = cuda_stream_->stream().value(); + } + // Receiving the data + auto inputLeftMessage = left_image_receiver_->receive(); + if (!inputLeftMessage) { + return GXF_FAILURE; + } + if (cuda_stream != 0) { + detail::RecordCudaEvent(inputLeftMessage.value(), cuda_stream_); + auto inputLeftStreamId = inputLeftMessage.value().get("stream"); + if (inputLeftStreamId) { + auto inputLeftStream = gxf::Handle::Create( + inputLeftStreamId.value().context(), + inputLeftStreamId.value()->stream_cid); + // NOTE: This is an expensive call. It will halt the current CPU thread until all events + // previously associated with the stream are cleared + if (!inputLeftStream.value()->syncStream()) { + GXF_LOG_ERROR("sync left stream failed."); + return GXF_FAILURE; + } + } + } + + auto inputRightMessage = right_image_receiver_->receive(); + if (!inputRightMessage) { + return GXF_FAILURE; + } + if (cuda_stream != 0) { + detail::RecordCudaEvent(inputRightMessage.value(), cuda_stream_); + auto inputRightStreamId = inputRightMessage.value().get("stream"); + if (inputRightStreamId) { + auto inputRightStream = gxf::Handle::Create( + inputRightStreamId.value().context(), + inputRightStreamId.value()->stream_cid); + // NOTE: This is an expensive call. It will halt the current CPU thread until all events + // previously associated with the stream are cleared + if (!inputRightStream.value()->syncStream()) { + GXF_LOG_ERROR("sync right stream failed."); + return GXF_FAILURE; + } + } + } + + auto maybeLeftName = left_image_name_.try_get(); + auto leftInputBuffer = inputLeftMessage.value().get( + maybeLeftName ? maybeLeftName.value().c_str() : nullptr); + if (!leftInputBuffer) { + return GXF_FAILURE; + } + auto maybeRightName = right_image_name_.try_get(); + auto rightInputBuffer = inputRightMessage.value().get( + maybeRightName ? maybeRightName.value().c_str() : nullptr); + if (!rightInputBuffer) { + return GXF_FAILURE; + } + if (leftInputBuffer.value()->storage_type() != gxf::MemoryStorageType::kDevice || + rightInputBuffer.value()->storage_type() != gxf::MemoryStorageType::kDevice) { + GXF_LOG_ERROR("input images must be on GPU."); + return GXF_FAILURE; + } + + const auto& leftBufferInfo = leftInputBuffer.value()->video_frame_info(); + const auto& rightBufferInfo = rightInputBuffer.value()->video_frame_info(); + if (leftBufferInfo.width != rightBufferInfo.width || + leftBufferInfo.height != rightBufferInfo.height || + leftBufferInfo.color_format != rightBufferInfo.color_format) { + GXF_LOG_ERROR("left/right images mismatch."); + return GXF_FAILURE; + } + const size_t outputWidth = ess_->getModelOutputWidth(); + const size_t outputHeight = ess_->getModelOutputHeight(); + + // Creating GXF Tensor or VideoBuffer to hold the data to be transmitted + gxf::Expected outputMessage = gxf::Entity::New(context()); + if (!outputMessage) { + return outputMessage.error(); + } + + // Creating GXF tensor to hold the data to be transmitted + auto videoBuffer = outputMessage->add(output_name_.get().c_str()); + if (!videoBuffer) { + GXF_LOG_ERROR("ESS::tick ==> Failed to create video buffer in output message."); + return videoBuffer.error(); + } + auto result = AllocateUnpaddedVideoBuffer( + videoBuffer.value(), outputWidth, outputHeight, gxf::MemoryStorageType::kDevice, pool_); + if (!result) { + GXF_LOG_ERROR("ESS::tick ==> Failed to allocate video buffer in output message."); + return GXF_FAILURE; + } + using D = typename cvcore::tensor_ops::detail::ChannelTypeToNative< + cvcore::tensor_ops::ImageTraits::CT>::Type; + const auto info = videoBuffer.value()->video_frame_info(); + auto pointer = reinterpret_cast(videoBuffer.value()->pointer()); + if (!pointer) { + GXF_LOG_ERROR("ESS::tick ==> Failed to reinterpret video buffer pointer."); + return GXF_FAILURE; + } + const auto& color_planes = info.color_planes; + auto outputImage = cvcore::tensor_ops::Image(info.width, info.height, + color_planes[0].stride, pointer, false); + + // Creating GXF VideoBuffer to hold confidence map to be transmitted + gxf::Expected outputConfMessage = gxf::Entity::New(context()); + if (!outputConfMessage) { + return outputConfMessage.error(); + } + auto confBuffer = outputConfMessage->add(output_name_.get().c_str()); + if (!confBuffer) { + return confBuffer.error(); + } + result = AllocateUnpaddedVideoBuffer( + confBuffer.value(), outputWidth, outputHeight, gxf::MemoryStorageType::kDevice, pool_); + if (!result) { + GXF_LOG_ERROR("ESS::tick ==> Failed to allocate video buffer in confidence message."); + return GXF_FAILURE; + } + + pointer = reinterpret_cast(confBuffer.value()->pointer()); + if (!pointer) { + return GXF_FAILURE; + } + auto outputConf = cvcore::tensor_ops::Image(info.width, info.height, + color_planes[0].stride, pointer, false); + + // Creating CVCore Tensors to hold the input and output data + Tensor_NHWC_C1_F32 outputImageDevice( + outputWidth, outputHeight, 1, + outputImage.getData(), false); + + Tensor_NHWC_C1_F32 outputConfDevice( + outputWidth, outputHeight, 1, + outputConf.getData(), false); + + // Running the inference + auto inputColorFormat = leftBufferInfo.color_format; + if (inputColorFormat == gxf::VideoFormat::GXF_VIDEO_FORMAT_RGB || + inputColorFormat == gxf::VideoFormat::GXF_VIDEO_FORMAT_BGR) { + Tensor_NHWC_C3_U8 leftImageDevice( + leftBufferInfo.width, leftBufferInfo.height, 1, reinterpret_cast( + leftInputBuffer.value()->pointer()), false); + Tensor_NHWC_C3_U8 rightImageDevice( + leftBufferInfo.width, leftBufferInfo.height, 1, reinterpret_cast + (rightInputBuffer.value()->pointer()), false); + ess_->execute(outputImageDevice, outputConfDevice, leftImageDevice, + rightImageDevice, cuda_stream); + } else if (inputColorFormat == gxf::VideoFormat::GXF_VIDEO_FORMAT_R32_G32_B32 || + inputColorFormat == gxf::VideoFormat::GXF_VIDEO_FORMAT_B32_G32_R32) { + Tensor_NCHW_C3_F32 leftImageDevice( + leftBufferInfo.width, leftBufferInfo.height, 1, reinterpret_cast( + leftInputBuffer.value()->pointer()), false); + Tensor_NCHW_C3_F32 rightImageDevice( + leftBufferInfo.width, leftBufferInfo.height, 1, reinterpret_cast( + rightInputBuffer.value()->pointer()), false); + ess_->execute(outputImageDevice, outputConfDevice, leftImageDevice, + rightImageDevice, cuda_stream); + } else { + GXF_LOG_ERROR("invalid input image type."); + return GXF_FAILURE; + } + + // Allocate a cuda event that can be used to record on each tick + if (!cuda_stream_.is_null()) { + detail::BindCudaStream(outputMessage.value(), cuda_stream_.cid()); + detail::RecordCudaEvent(outputMessage.value(), cuda_stream_); + } + + // Pass down timestamp if necessary + auto maybeDaqTimestamp = + timestamp_policy_.get() == 0 ? inputLeftMessage.value().get() + : inputRightMessage.value().get(); + if (maybeDaqTimestamp) { + auto outputTimestamp = outputMessage.value().add( + maybeDaqTimestamp.value().name()); + if (!outputTimestamp) { + return outputTimestamp.error(); + } + *outputTimestamp.value() = *maybeDaqTimestamp.value(); + + auto confTimestamp = outputConfMessage.value().add( + maybeDaqTimestamp.value().name()); + if (!confTimestamp) { + return confTimestamp.error(); + } + *confTimestamp.value() = *maybeDaqTimestamp.value(); + } + + detail::PassthroughComponents(outputMessage.value(), inputLeftMessage.value(), + "sequence_number"); + detail::PassthroughComponents(outputMessage.value(), inputRightMessage.value(), + "extrinsics"); + detail::PassthroughComponents(outputConfMessage.value(), inputLeftMessage.value(), + "sequence_number"); + detail::PassthroughComponents(outputConfMessage.value(), inputRightMessage.value(), + "extrinsics"); + + // use intrinsics scaling since input and output resolution may differ + auto maybe_input_model = inputLeftMessage->get("intrinsics"); + if (maybe_input_model) { + auto maybe_output_model = outputMessage->add("intrinsics"); + if (!maybe_output_model) { + GXF_LOG_ERROR("creating output intrinsics failed."); + return gxf::ToResultCode(maybe_output_model); + } + auto maybe_output_conf = outputConfMessage->add("intrinsics"); + if (!maybe_output_conf) { + GXF_LOG_ERROR("creating conf intrinsics failed."); + return gxf::ToResultCode(maybe_output_conf); + } + gxf::Handle input_model = maybe_input_model.value(); + gxf::Handle output_model = maybe_output_model.value(); + gxf::Handle output_conf = maybe_output_conf.value(); + + gxf::Expected maybe_scaled_model = + tensor_ops::GetScaledCameraModel(*input_model, outputWidth, outputHeight, false); + + if (!maybe_scaled_model) { + GXF_LOG_ERROR("computing output intrinsics failed."); + return gxf::ToResultCode(maybe_scaled_model); + } + + *output_model = *maybe_scaled_model; + *output_conf = *maybe_scaled_model; + } else { + GXF_LOG_WARNING("Input message is missing intrinsics!"); + } + + // Publish the data and confidence + GXF_RETURN_IF_ERROR(gxf::ToResultCode(output_transmitter_->publish(outputMessage.value()))); + GXF_RETURN_IF_ERROR(gxf::ToResultCode(confidence_transmitter_->publish( + outputConfMessage.value()))); + + return GXF_SUCCESS; +} + +gxf_result_t ESSInference::stop() { + ess_.reset(nullptr); + return GXF_SUCCESS; +} + +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/ess/ESS.hpp b/isaac_ros_ess/gxf/ess/extensions/ess/components/ess_inference.hpp similarity index 76% rename from isaac_ros_ess/gxf/ess/extensions/ess/ESS.hpp rename to isaac_ros_ess/gxf/ess/extensions/ess/components/ess_inference.hpp index a4d32f0..2fe996d 100644 --- a/isaac_ros_ess/gxf/ess/extensions/ess/ESS.hpp +++ b/isaac_ros_ess/gxf/ess/extensions/ess/components/ess_inference.hpp @@ -15,16 +15,15 @@ // // SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_ESS_HPP -#define NVIDIA_CVCORE_ESS_HPP +#pragma once #include #include +#include +#include #include -#include - -#include "extensions/tensor_ops/ImageAdapter.hpp" +#include "extensions/ess/inference/ESS.h" #include "gxf/cuda/cuda_stream_pool.hpp" #include "gxf/std/allocator.hpp" #include "gxf/std/codelet.hpp" @@ -32,13 +31,13 @@ #include "gxf/std/transmitter.hpp" namespace nvidia { -namespace cvcore { +namespace isaac { // CV-Core ESS GXF Codelet -class ESS : public gxf::Codelet { -public: - ESS() = default; - ~ESS() = default; +class ESSInference : public gxf::Codelet { + public: + ESSInference() = default; + ~ESSInference() = default; gxf_result_t registerInterface(gxf::Registrar* registrar) override; gxf_result_t initialize() override { @@ -52,17 +51,19 @@ class ESS : public gxf::Codelet { gxf_result_t tick() override; gxf_result_t stop() override; -private: + private: // cvcore image pre-processing params for ESS - ::cvcore::ImagePreProcessingParams preProcessorParams; + cvcore::tensor_ops::ImagePreProcessingParams preprocessor_params_; // cvcore model input params for ESS - ::cvcore::ModelInputParams modelInputParams; + ess::ModelInputParams model_input_params_; + // cvcore model build params for ESS + ess::ModelBuildParams model_build_params_; // cvcore inference params for ESS - ::cvcore::ModelInferenceParams inferenceParams; + ess::ModelInferenceParams inference_params_; // extra params for ESS - ::cvcore::ess::ESSPreProcessorParams extraParams; + ess::ESSPreProcessorParams extra_params_; // cvcore ESS object - std::unique_ptr<::cvcore::ess::ESS> objESS; + std::unique_ptr ess_; // The name of the input left image tensor gxf::Parameter left_image_name_; @@ -80,8 +81,8 @@ class ESS : public gxf::Codelet { gxf::Parameter> right_image_receiver_; // Data transmitter to send the data gxf::Parameter> output_transmitter_; - // Image adapter for output image - gxf::Parameter> output_adapter_; + // Confidence transmitter + gxf::Parameter> confidence_transmitter_; // Pre-processing params for ESS gxf::Parameter image_type_; @@ -91,10 +92,15 @@ class ESS : public gxf::Codelet { // Model input params for ESS gxf::Parameter max_batch_size_; - gxf::Parameter input_layer_width_; - gxf::Parameter input_layer_height_; gxf::Parameter model_input_type_; + // Model build params + gxf::Parameter force_engine_update_; + gxf::Parameter onnx_file_path_; + gxf::Parameter enable_fp16_; + gxf::Parameter max_workspace_size_; + gxf::Parameter dla_core_; + // Inference params for ESS gxf::Parameter engine_file_path_; gxf::Parameter> input_layers_name_; @@ -103,17 +109,11 @@ class ESS : public gxf::Codelet { // Extra Pre-process param gxf::Parameter preprocess_type_; - // Output params - gxf::Parameter output_width_; - gxf::Parameter output_height_; - // Decide which timestamp to pass down gxf::Parameter timestamp_policy_; gxf::Handle cuda_stream_ = nullptr; }; -} // namespace cvcore -} // namespace nvidia - -#endif +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIImageWarp.h b/isaac_ros_ess/gxf/ess/extensions/ess/ess.cpp similarity index 52% rename from isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIImageWarp.h rename to isaac_ros_ess/gxf/ess/extensions/ess/ess.cpp index 71d098b..f46610d 100644 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/vpi/VPIImageWarp.h +++ b/isaac_ros_ess/gxf/ess/extensions/ess/ess.cpp @@ -1,5 +1,5 @@ // SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,24 +14,18 @@ // limitations under the License. // // SPDX-License-Identifier: Apache-2.0 -#ifndef CVCORE_VPIIMAGEWARP_H -#define CVCORE_VPIIMAGEWARP_H -#include -#include -#include "cv/tensor_ops/IImageWarp.h" -#include "cv/tensor_ops/Errors.h" -#include +#include "extensions/ess/components/ess_inference.hpp" +#include "gxf/std/extension_factory_helper.hpp" -namespace cvcore { namespace tensor_ops { +GXF_EXT_FACTORY_BEGIN() -struct VPIImageWarp : public IImageWarp -{ - ~VPIImageWarp() = default; +GXF_EXT_FACTORY_SET_INFO(0xce7c6985267a4ec7, 0xa073030e16e49f29, "ESS", + "Extension containing ESS related components", + "Isaac SDK", "2.0.0", "LICENSE"); - VPIPayload payload; -}; +GXF_EXT_FACTORY_ADD(0x1aa1eea914344afe, 0x97fddaaedb594120, + nvidia::isaac::ESSInference, nvidia::gxf::Codelet, + "ESS GXF Extension"); -}} // namespace cvcore::tensor_ops - -#endif // CVCORE_VPIIMAGEWARP_H +GXF_EXT_FACTORY_END() diff --git a/isaac_ros_ess/gxf/ess/extensions/ess/inference/ESS.cpp b/isaac_ros_ess/gxf/ess/extensions/ess/inference/ESS.cpp new file mode 100644 index 0000000..05efa75 --- /dev/null +++ b/isaac_ros_ess/gxf/ess/extensions/ess/inference/ESS.cpp @@ -0,0 +1,267 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +#include "extensions/ess/inference/ESS.h" + +#include +#include +#include + +#include "extensions/tensorops/core/CVError.h" +#include "extensions/tensorops/core/Image.h" +#include "extensions/tensorops/core/ImageUtils.h" +#include "extensions/tensorops/core/Memory.h" +#include "gems/dnn_inferencer/inferencer/IInferenceBackend.h" +#include "gems/dnn_inferencer/inferencer/Inferencer.h" + +namespace nvidia { +namespace isaac { +namespace ess { +using ImagePreProcessingParams = cvcore::tensor_ops::ImagePreProcessingParams; +using TensorRTInferenceParams = cvcore::inferencer::TensorRTInferenceParams; +using InferenceBackendClient = cvcore::inferencer::InferenceBackendClient; +using InferenceBackendFactory = cvcore::inferencer::InferenceBackendFactory; +using TRTInferenceType = cvcore::inferencer::TRTInferenceType; +using ErrorCode = cvcore::tensor_ops::ErrorCode; + +/* Default parameters used for the model provided*/ +const ImagePreProcessingParams defaultPreProcessorParams = { + ImageType::BGR_U8, /**< Input type of image.*/ + {-128, -128, -128}, /**< Mean value */ + {1.0 / 255.0, 1.0 / 255.0, 1.0 / 255.0}, /**< Normalization factor */ + {0.5, 0.5, 0.5}}; /**< Standard deviation */ + +const ModelInputParams defaultModelInputParams = {1, /**< Max batch size supported */ + ImageType::RGB_U8}; /**< Input image type the network is trained with */ + +const ModelInferenceParams defaultInferenceParams = { + "models/ess.engine", /**< Engine file path */ + {"input_left", "input_right"}, /**< Input layer name of the model */ + {"output_left", "output_conf"}}; /**< Output layer name of the network */ + +const ESSPreProcessorParams defaultESSPreProcessorParams = { + PreProcessType::RESIZE}; // Resize the input image to the network input dimensions + +const int InferencerVersion = NV_TENSORRT_VERSION; + +struct ESS::ESSImpl { + TensorRTInferenceParams tensorrtParams; + InferenceBackendClient client; + + // Model includes 2 input layers and 1 output layer + Tensor_NHWC_C1_F32 m_outputDevice; + Tensor_NHWC_C1_F32 m_confMap; + Tensor_NCHW_C3_F32 m_inputLeftPlanar; + Tensor_NCHW_C3_F32 m_inputRightPlanar; + + // Preprocess and PostProcess Objects + std::unique_ptr m_preprocess; + std::unique_ptr m_postprocess; + + // Max batch Size supported + size_t m_maxBatchSize; + + std::string m_leftInputLayerName, m_rightInputLayerName; + std::string m_confMapName; + + size_t m_networkInputWidth, m_networkInputHeight; + size_t m_networkOutputWidth, m_networkOutputHeight; + + ESSImpl(const ImagePreProcessingParams & imgParams, + const ModelInputParams & modelInputParams, + const ModelBuildParams & modelBuildParams, + const ModelInferenceParams & modelInferParams, + const ESSPreProcessorParams & essParams) + : m_maxBatchSize(modelInputParams.maxBatchSize) { + if (modelInferParams.inputLayers.size() != 2 || modelInferParams.outputLayers.size() != 2 || + modelInputParams.maxBatchSize <= 0) { + throw ErrorCode::INVALID_ARGUMENT; + } + + cvcore::inferencer::OnnxModelBuildFlag buildFlags = + cvcore::inferencer::OnnxModelBuildFlag::NONE; + if (modelBuildParams.enable_fp16) { + buildFlags = cvcore::inferencer::OnnxModelBuildFlag::kFP16; + } + // Initialize TRT backend + tensorrtParams = {TRTInferenceType::TRT_ENGINE, + nullptr, + modelBuildParams.onnx_file_path, + modelInferParams.engineFilePath, + modelBuildParams.force_engine_update, + buildFlags, + modelBuildParams.max_workspace_size, + modelInputParams.maxBatchSize, + modelInferParams.inputLayers, + modelInferParams.outputLayers, + modelBuildParams.dla_core}; + + std::error_code err = + InferenceBackendFactory::CreateTensorRTInferenceBackendClient(client, tensorrtParams); + + if (err != cvcore::tensor_ops::make_error_code(ErrorCode::SUCCESS)) { + throw err; + } + + cvcore::inferencer::ModelMetaData modelInfo = client->getModelMetaData(); + if (modelInfo.maxBatchSize != modelInputParams.maxBatchSize) { + throw ErrorCode::INVALID_ARGUMENT; + } + m_leftInputLayerName = modelInferParams.inputLayers[0]; + m_rightInputLayerName = modelInferParams.inputLayers[1]; + m_confMapName = modelInferParams.outputLayers[1]; + + m_networkInputHeight = modelInfo.inputLayers[m_leftInputLayerName].shape[2]; + m_networkInputWidth = modelInfo.inputLayers[m_leftInputLayerName].shape[3]; + m_inputLeftPlanar = {m_networkInputWidth, m_networkInputHeight, + modelInputParams.maxBatchSize, false}; + m_inputRightPlanar = {m_networkInputWidth, m_networkInputHeight, + modelInputParams.maxBatchSize, false}; + + m_networkOutputHeight = modelInfo.outputLayers[modelInferParams.outputLayers[0]].shape[1]; + m_networkOutputWidth = modelInfo.outputLayers[modelInferParams.outputLayers[0]].shape[2]; + m_outputDevice = {m_networkOutputWidth, m_networkOutputHeight, + modelInputParams.maxBatchSize, false}; + m_confMap = {m_networkOutputWidth, m_networkOutputHeight, + modelInputParams.maxBatchSize, false}; + + CHECK_ERROR_CODE(client->setInput(m_inputLeftPlanar, modelInferParams.inputLayers[0])); + CHECK_ERROR_CODE(client->setInput(m_inputRightPlanar, modelInferParams.inputLayers[1])); + CHECK_ERROR_CODE(client->setOutput(m_outputDevice, modelInferParams.outputLayers[0])); + + // Initialize Preprocessor and postprocessor + m_preprocess.reset(new ESSPreProcessor(imgParams, modelInputParams, + m_networkInputWidth, m_networkInputHeight, essParams)); + m_postprocess.reset(new ESSPostProcessor(modelInputParams, + m_networkOutputWidth, m_networkOutputHeight)); + } + + ~ESSImpl() { + CHECK_ERROR_CODE(client->unregister()); + InferenceBackendFactory::DestroyTensorRTInferenceBackendClient(client); + } + + size_t getModelOutputHeight() { + return m_networkOutputHeight; + } + + size_t getModelOutputWidth() { + return m_networkOutputWidth; + } + + void execute(Tensor_NHWC_C1_F32 & output, Tensor_NHWC_C1_F32 & confMap, + const Tensor_NHWC_C3_U8 & inputLeft, const Tensor_NHWC_C3_U8 & inputRight, + cudaStream_t stream) { + size_t batchSize = inputLeft.getDepth(); + if (inputLeft.isCPU() || inputRight.isCPU()) { + throw std::invalid_argument("ESS : Input type should be GPU buffer"); + } + + if (inputLeft.getDepth() > m_maxBatchSize || inputRight.getDepth() > m_maxBatchSize) { + throw std::invalid_argument("ESS : Input batch size cannot exceed max batch size\n"); + } + + if (inputLeft.getDepth() != inputRight.getDepth() || + output.getDepth() != inputLeft.getDepth()) { + throw std::invalid_argument("ESS : Batch size of input and output images don't" + "match!\n"); + } + m_preprocess->execute(m_inputLeftPlanar, m_inputRightPlanar, inputLeft, + inputRight, stream); + + CHECK_ERROR_CODE(client->setOutput(confMap, m_confMapName)); + CHECK_ERROR_CODE(client->setCudaStream(stream)); + CHECK_ERROR_CODE(client->infer(batchSize)); + // PostProcess + m_postprocess->execute(output, m_outputDevice, stream); + } + + void execute(Tensor_NHWC_C1_F32 & output, Tensor_NHWC_C1_F32 & confMap, + const Tensor_NCHW_C3_F32 & inputLeft, const Tensor_NCHW_C3_F32 & inputRight, + cudaStream_t stream) { + size_t batchSize = inputLeft.getDepth(); + if (inputLeft.isCPU() || inputRight.isCPU()) { + throw std::invalid_argument("ESS : Input type should be GPU buffer"); + } + + if (inputLeft.getDepth() > m_maxBatchSize || inputRight.getDepth() > m_maxBatchSize) { + throw std::invalid_argument("ESS : Input batch size cannot exceed max batch size\n"); + } + + if (inputLeft.getDepth() != inputRight.getDepth() || + output.getDepth() != inputLeft.getDepth()) { + throw std::invalid_argument("ESS : Batch size of input and output images don't" + "match!\n"); + } + + if (inputLeft.getWidth() != m_networkInputWidth || + inputLeft.getHeight() != m_networkInputHeight) { + throw std::invalid_argument("ESS : Left preprocessed input does not match network" + "input dimensions!\n"); + } + + if (inputRight.getWidth() != m_networkInputWidth || + inputRight.getHeight() != m_networkInputHeight) { + throw std::invalid_argument("ESS : Right preprocessed input does not match network" + "input dimensions!\n"); + } + + // inference + CHECK_ERROR_CODE(client->setInput(inputLeft, m_leftInputLayerName)); + CHECK_ERROR_CODE(client->setInput(inputRight, m_rightInputLayerName)); + CHECK_ERROR_CODE(client->setOutput(confMap, m_confMapName)); + CHECK_ERROR_CODE(client->setCudaStream(stream)); + CHECK_ERROR_CODE(client->infer(batchSize)); + + // PostProcess + m_postprocess->execute(output, m_outputDevice, stream); + } +}; + +ESS::ESS(const ImagePreProcessingParams & imgParams, + const ModelInputParams & modelInputParams, + const ModelBuildParams & modelBuildParams, + const ModelInferenceParams & modelInferParams, + const ESSPreProcessorParams & essParams) + : m_pImpl(new ESSImpl(imgParams, modelInputParams, + modelBuildParams, modelInferParams, essParams)) {} + +ESS::~ESS() {} + +void ESS::execute(Tensor_NHWC_C1_F32 & output, Tensor_NHWC_C1_F32 & confMap, + const Tensor_NCHW_C3_F32 & inputLeft, const Tensor_NCHW_C3_F32 & inputRight, + cudaStream_t stream) { + m_pImpl->execute(output, confMap, inputLeft, inputRight, stream); +} + +void ESS::execute(Tensor_NHWC_C1_F32 & output, Tensor_NHWC_C1_F32 & confMap, + const Tensor_NHWC_C3_U8 & inputLeft, const Tensor_NHWC_C3_U8 & inputRight, + cudaStream_t stream) { + m_pImpl->execute(output, confMap, inputLeft, inputRight, stream); +} + +size_t ESS::getModelOutputHeight() { + return m_pImpl->getModelOutputHeight(); +} + +size_t ESS::getModelOutputWidth() { + return m_pImpl->getModelOutputWidth(); +} + +} // namespace ess +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/ess/inference/ESS.h b/isaac_ros_ess/gxf/ess/extensions/ess/inference/ESS.h new file mode 100644 index 0000000..acd67d5 --- /dev/null +++ b/isaac_ros_ess/gxf/ess/extensions/ess/inference/ESS.h @@ -0,0 +1,276 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include +#include +#include + +#include "extensions/tensorops/core/Array.h" +#include "extensions/tensorops/core/Core.h" +#include "extensions/tensorops/core/Image.h" +#include "extensions/tensorops/core/Tensor.h" + +namespace nvidia { +namespace isaac { +namespace ess { + +using TensorLayout = cvcore::tensor_ops::TensorLayout; +using ChannelType = cvcore::tensor_ops::ChannelType; +using ChannelCount = cvcore::tensor_ops::ChannelCount; +using TensorDimension = cvcore::tensor_ops::TensorDimension; +using Tensor_NHWC_C3_U8 = cvcore::tensor_ops::Tensor; +using Tensor_NHWC_C3_F32 = cvcore::tensor_ops::Tensor; +using Tensor_NHWC_C1_F32 = cvcore::tensor_ops::Tensor; +using Tensor_NCHW_C3_F32 = cvcore::tensor_ops::Tensor; +using Tensor_HWC_C3_U8 = cvcore::tensor_ops::Tensor; +using DisparityLevels = cvcore::tensor_ops::Array; +using TensorBase = cvcore::tensor_ops::TensorBase; +using ImagePreProcessingParams = cvcore::tensor_ops::ImagePreProcessingParams; +using ImageType = cvcore::tensor_ops::ImageType; + +/** + * Struct to describe input type required by the model + */ +struct ModelInputParams { + size_t maxBatchSize; /**< maxbatchSize supported by network*/ + cvcore::tensor_ops::ImageType modelInputType; /**< Input Layout type */ +}; + +/** + * Struct to describe parameters used for building engine file + */ +struct ModelBuildParams { + bool force_engine_update; + std::string onnx_file_path; + bool enable_fp16; + int64_t max_workspace_size; + int64_t dla_core; +}; + +/** + * Struct to describe the model + */ +struct ModelInferenceParams { + std::string engineFilePath; /**< Engine file path. */ + std::vector inputLayers; /**< names of input layers. */ + std::vector outputLayers; /**< names of output layers. */ +}; + +/** + * Describes the algorithm supported for ESS Preprocessing + */ +enum class PreProcessType : uint8_t { + RESIZE = 0, // Resize to network dimensions without maintaining aspect ratio + CENTER_CROP // Crop to network dimensions from center of image +}; + +/** + * Describes the parameters for ESS Preprocessing + */ +struct ESSPreProcessorParams { + /* Preprocessing steps for ESS */ + PreProcessType preProcessType; +}; + +/** + * Default parameters for the preprocessing pipeline. + */ +CVCORE_API extern const ImagePreProcessingParams defaultPreProcessorParams; + +/** + * Default parameters to describe the input expected for the model. + */ +CVCORE_API extern const ModelInputParams defaultModelInputParams; + +/** + * Default parameters to describe the model inference parameters. + */ +CVCORE_API extern const ModelInferenceParams defaultInferenceParams; + +/** + * Default parameters for the ESS Preprocessing + */ +CVCORE_API extern const ESSPreProcessorParams defaultESSPreProcessorParams; + +/** + * Version of inference engine used + */ +CVCORE_API extern const int InferencerVersion; + +/* + * Interface for running pre-processing on ESS model. + */ +class CVCORE_API ESSPreProcessor { + public: + /** + * Default constructor is deleted + */ + ESSPreProcessor() = delete; + + /** + * Constructor of ESSPreProcessor. + * @param preProcessorParams image pre-processing parameters. + * @param modelInputParams model paramters for network. + * @param output_width PreProcessor output width. + * @param output_height PreProcessor output height. + * @param essPreProcessorParams paramaters specific for ess preprocessing. + */ + ESSPreProcessor(const ImagePreProcessingParams & preProcessorParams, + const ModelInputParams & modelInputParams, + size_t output_width, size_t output_height, + const ESSPreProcessorParams & essPreProcessorParams); + + /** + * Destructor of ESSPreProcessor. + */ + ~ESSPreProcessor(); + + /** + * Main interface to run pre-processing. + * @param stream cuda stream. + */ + void execute(Tensor_NCHW_C3_F32& leftOutput, Tensor_NCHW_C3_F32& rightOutput, + const Tensor_NHWC_C3_U8& leftInput, const Tensor_NHWC_C3_U8& rightInput, + cudaStream_t stream = 0); + + private: + /** + * Implementation of ESSPreProcessor. + */ + struct ESSPreProcessorImpl; + std::unique_ptr m_pImpl; +}; + +/** + * ESS parameters and implementation + */ +class CVCORE_API ESS { + public: + /** + * Constructor for ESS. + * @param imgparams image pre-processing parameters. + * @param modelInputParams model paramters for network. + * @param modelBuildParams model parameters for building engine. + * @param modelInferParams model input inference parameters. + * @param essPreProcessorParams paramaters specific for ess preprocessing. + */ + ESS(const ImagePreProcessingParams & imgparams, + const ModelInputParams & modelInputParams, + const ModelBuildParams & modelBuildParams, + const ModelInferenceParams & modelInferParams, + const ESSPreProcessorParams & essPreProcessorParams); + + /** + * Default constructor not supported. + */ + ESS() = delete; + + /** + * Destructor for ESS. + */ + ~ESS(); + + /** + * Inference function for a given BGR image + * @param disparityMap Disparity map (CPU/GPU tensor supported) + * @param leftInput RGB/BGR Interleaved Left image (Only GPU Input Tensor + * supported) + * @param rightInput RGB/BGR Interleaved Right image (Only GPU Input Tensor + * supported) + * @param stream Cuda stream + */ + void execute(Tensor_NHWC_C1_F32 & disparityMap, Tensor_NHWC_C1_F32 & confMap, + const Tensor_NHWC_C3_U8 & leftInput, const Tensor_NHWC_C3_U8 & rightInput, + cudaStream_t stream = 0); + + /** + * Inference function for a given Preprocessed image + * @param disparityMap Disparity map (CPU/GPU tensor supported) + * @param leftInput RGB Planar Left image resized to network input dimensions (Only GPU Input Tensor + * supported) + * @param rightInput RGB Planar Right image resized to network input dimensions (Only GPU Input Tensor + * supported) + * @param stream Cuda stream + */ + void execute(Tensor_NHWC_C1_F32& disparityMap, Tensor_NHWC_C1_F32 & confMap, + const Tensor_NCHW_C3_F32& leftInput, const Tensor_NCHW_C3_F32& rightInput, + cudaStream_t stream = 0); + + /** + * Helper function to get Model output Height + */ + size_t getModelOutputHeight(); + + /** + * Helper function to get Model output Width + */ + size_t getModelOutputWidth(); + + private: + struct ESSImpl; + std::unique_ptr m_pImpl; +}; + +/** + * ESS parameters and implementation + */ +class CVCORE_API ESSPostProcessor { + public: + /** + * Constructor for ESS. + * @param modelInputParams model parameters for network. + * @param output_width PostProcessor output width. + * @param output_height PostProcessor output height. + */ + ESSPostProcessor(const ModelInputParams & modelParams, + size_t output_width, size_t output_height); + /** + * Default constructor not supported. + */ + ESSPostProcessor() = delete; + + /** + * Destructor for ESS. + */ + ~ESSPostProcessor(); + + /** + * Inference function for a given BGR image + * @param outputdisparityMap Disparity map rescaled to orginal resolution (CPU/GPU tensor) + * @param inputDisparityMap input Disparity map (GPU tensor) + * @param stream Cuda stream + */ + void execute(Tensor_NHWC_C1_F32 & outputdisparityMap, + const Tensor_NHWC_C1_F32 & inputdisparityMap, + cudaStream_t stream = 0); + + private: + struct ESSPostProcessorImpl; + std::unique_ptr m_pImpl; +}; + +} // namespace ess +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/ess/inference/ESSPostProcess.cpp b/isaac_ros_ess/gxf/ess/extensions/ess/inference/ESSPostProcess.cpp new file mode 100644 index 0000000..b67c9d9 --- /dev/null +++ b/isaac_ros_ess/gxf/ess/extensions/ess/inference/ESSPostProcess.cpp @@ -0,0 +1,118 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +#include +#include + +#include "extensions/ess/inference/ESS.h" +#include "extensions/tensorops/core/BBoxUtils.h" +#include "extensions/tensorops/core/CVError.h" +#include "extensions/tensorops/core/ImageUtils.h" +#include "extensions/tensorops/core/Memory.h" + +namespace nvidia { +namespace isaac { +namespace ess { + +struct ESSPostProcessor::ESSPostProcessorImpl { + ESSPostProcessorImpl(const ModelInputParams & modelParams, + const size_t output_width, const size_t output_height) + : m_maxBatchSize(modelParams.maxBatchSize) + , m_networkWidth(output_width) + , m_networkHeight(output_height) { + m_scaledDisparityDevice = {m_networkWidth, m_networkHeight, m_maxBatchSize, false}; + m_outputDisparityDevice = {m_networkWidth, m_networkHeight, m_maxBatchSize, false}; + } + + void resizeBuffers(std::size_t width, std::size_t height) { + if (m_outputDisparityDevice.getWidth() == width && + m_outputDisparityDevice.getHeight() == height) { + return; + } + m_outputDisparityDevice = {width, height, m_maxBatchSize, false}; + } + + void process(Tensor_NHWC_C1_F32 & outputDisparity, const Tensor_NHWC_C1_F32 & inputDisparity, + cudaStream_t stream) { + if (inputDisparity.isCPU()) { + throw std::invalid_argument("ESSPostProcessor : Input Tensor must be GPU Tensor."); + } + + if (inputDisparity.getWidth() != m_networkWidth || + inputDisparity.getHeight() != m_networkHeight) { + std::cerr << "input disparity: " << inputDisparity.getWidth() << "x" + << inputDisparity.getHeight() << ", network size: " + << m_networkWidth << "x" << m_networkHeight << std::endl; + throw std::invalid_argument( + "ESSPostProcessor : Input Tensor dimension " + "does not match network input " + "requirement"); + } + + if (inputDisparity.getDepth() != outputDisparity.getDepth()) { + throw std::invalid_argument("ESSPostProcessor : Input/Output Tensor batchsize" + "mismatch."); + } + + const size_t batchSize = inputDisparity.getDepth(); + if (batchSize > m_maxBatchSize) { + throw std::invalid_argument("ESSPostProcessor : Input batchsize exceeds Max" + "Batch size."); + } + const size_t outputWidth = outputDisparity.getWidth(); + const size_t outputHeight = outputDisparity.getHeight(); + + // Disparity map values are scaled based on the outputWidth/networkInputWidth ratio + const float scale = static_cast(outputWidth) / m_networkWidth; + Tensor_NHWC_C1_F32 scaledDisparity(m_scaledDisparityDevice.getWidth(), + m_scaledDisparityDevice.getHeight(), batchSize, + m_scaledDisparityDevice.getData(), false); + + cvcore::tensor_ops::Normalize(scaledDisparity, inputDisparity, scale, 0, stream); + if (!outputDisparity.isCPU()) { + cvcore::tensor_ops::Resize(outputDisparity, m_scaledDisparityDevice, stream); + } else { + resizeBuffers(outputWidth, outputHeight); + Tensor_NHWC_C1_F32 outputDisparityDevice(m_outputDisparityDevice.getWidth(), + m_outputDisparityDevice.getHeight(), batchSize, + m_outputDisparityDevice.getData(), false); + cvcore::tensor_ops::Resize(outputDisparityDevice, m_scaledDisparityDevice, stream); + cvcore::tensor_ops::Copy(outputDisparity, outputDisparityDevice, stream); + CHECK_ERROR(cudaStreamSynchronize(stream)); + } + } + + size_t m_maxBatchSize; + size_t m_networkWidth, m_networkHeight; + Tensor_NHWC_C1_F32 m_scaledDisparityDevice; + Tensor_NHWC_C1_F32 m_outputDisparityDevice; +}; + +void ESSPostProcessor::execute(Tensor_NHWC_C1_F32 & outputDisparity, + const Tensor_NHWC_C1_F32 & inputDisparity, cudaStream_t stream) { + m_pImpl->process(outputDisparity, inputDisparity, stream); +} + +ESSPostProcessor::ESSPostProcessor(const ModelInputParams & modelInputParams, + size_t output_width, size_t output_height) + : m_pImpl(new ESSPostProcessor::ESSPostProcessorImpl(modelInputParams, + output_width, output_height)) {} + +ESSPostProcessor::~ESSPostProcessor() {} + +} // namespace ess +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/ess/inference/ESSPreProcess.cpp b/isaac_ros_ess/gxf/ess/extensions/ess/inference/ESSPreProcess.cpp new file mode 100644 index 0000000..d250451 --- /dev/null +++ b/isaac_ros_ess/gxf/ess/extensions/ess/inference/ESSPreProcess.cpp @@ -0,0 +1,190 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +#include +#include + +#include "extensions/ess/inference/ESS.h" +#include "extensions/tensorops/core/BBoxUtils.h" +#include "extensions/tensorops/core/CVError.h" +#include "extensions/tensorops/core/Image.h" +#include "extensions/tensorops/core/ImageUtils.h" +#include "extensions/tensorops/core/Memory.h" + +namespace nvidia { +namespace isaac { +namespace ess { +using ImageType = cvcore::tensor_ops::ImageType; +using BBox = cvcore::tensor_ops::BBox; + +struct ESSPreProcessor::ESSPreProcessorImpl { + size_t m_maxBatchSize; + size_t m_outputWidth; + size_t m_outputHeight; + PreProcessType m_processType; + ImagePreProcessingParams m_preProcessorParams; + Tensor_NHWC_C3_U8 m_resizedDeviceLeftInput; + Tensor_NHWC_C3_U8 m_resizedDeviceRightInput; + Tensor_NHWC_C3_F32 m_normalizedDeviceLeftInput; + Tensor_NHWC_C3_F32 m_normalizedDeviceRightInput; + bool m_swapRB; + + ESSPreProcessorImpl(const ImagePreProcessingParams & imgParams, + const ModelInputParams & modelParams, + size_t output_width, size_t output_height, + const ESSPreProcessorParams & essParams) + : m_maxBatchSize(modelParams.maxBatchSize) + , m_outputHeight(output_height) + , m_outputWidth(output_width) + , m_processType(essParams.preProcessType) + , m_preProcessorParams(imgParams) { + if (imgParams.imgType != ImageType::BGR_U8 && + imgParams.imgType != ImageType::RGB_U8) { + throw std::invalid_argument("ESSPreProcessor : Only image types RGB_U8/BGR_U8 are" + "supported\n"); + } + m_resizedDeviceLeftInput = {output_width, output_height, + modelParams.maxBatchSize, false}; + m_resizedDeviceRightInput = {output_width, output_height, + modelParams.maxBatchSize, false}; + m_normalizedDeviceLeftInput = {output_width, output_height, + modelParams.maxBatchSize, false}; + m_normalizedDeviceRightInput = {output_width, output_height, + modelParams.maxBatchSize, false}; + m_swapRB = imgParams.imgType != modelParams.modelInputType; + } + + void process(Tensor_NCHW_C3_F32 & outputLeft, Tensor_NCHW_C3_F32 & outputRight, + const Tensor_NHWC_C3_U8 & inputLeft, const Tensor_NHWC_C3_U8 & inputRight, + cudaStream_t stream) { + if (inputLeft.isCPU() || inputRight.isCPU() || outputLeft.isCPU() || + outputRight.isCPU()) { + throw std::invalid_argument("ESSPreProcessor : Input/Output Tensor must be" + "GPU Tensor."); + } + + if (outputLeft.getWidth() != m_outputWidth || outputLeft.getHeight() != m_outputHeight || + outputRight.getWidth() != m_outputWidth || outputRight.getHeight() != m_outputHeight) { + throw std::invalid_argument( + "ESSPreProcessor : Output Tensor dimension does not match network input" + "requirement"); + } + + if (inputLeft.getWidth() != inputRight.getWidth() || + inputLeft.getHeight() != inputRight.getHeight()) { + throw std::invalid_argument("ESSPreProcessor : Input tensor dimensions don't match"); + } + + if (outputLeft.getDepth() != inputLeft.getDepth() || + outputRight.getDepth() != inputRight.getDepth() || + inputLeft.getDepth() != inputRight.getDepth()) { + throw std::invalid_argument("ESSPreProcessor : Input/Output Tensor batchsize" + "mismatch."); + } + + if (outputLeft.getDepth() > m_maxBatchSize) { + throw std::invalid_argument("ESSPreProcessor : Input/Output batchsize exceeds" + "max batch size."); + } + + const size_t batchSize = inputLeft.getDepth(); + const size_t inputWidth = inputLeft.getWidth(); + const size_t inputHeight = inputLeft.getHeight(); + + if (m_processType == PreProcessType::RESIZE) { + cvcore::tensor_ops::Resize(m_resizedDeviceLeftInput, inputLeft, stream); + cvcore::tensor_ops::Resize(m_resizedDeviceRightInput, inputRight, stream); + } else { + const float centerX = static_cast(inputWidth) / 2.0; + const float centerY = static_cast(inputHeight) / 2.0; + const float offsetX = static_cast(m_outputWidth) / 2.0; + const float offsetY = static_cast(m_outputHeight) / 2.0; + BBox srcCrop, dstCrop; + dstCrop = {0, 0, static_cast(m_outputWidth - 1), static_cast( + m_outputHeight - 1)}; + srcCrop.xmin = std::max(0, static_cast(centerX - offsetX)); + srcCrop.ymin = std::max(0, static_cast(centerY - offsetY)); + srcCrop.xmax = std::min(static_cast(m_outputWidth - 1), + static_cast(centerX + offsetX)); + srcCrop.ymax = std::min(static_cast(m_outputHeight - 1), + static_cast(centerY + offsetY)); + for (size_t i = 0; i < batchSize; i++) { + Tensor_HWC_C3_U8 inputLeftCrop( + inputWidth, inputHeight, + const_cast(inputLeft.getData()) + i * + inputLeft.getStride(TensorDimension::DEPTH), + false); + Tensor_HWC_C3_U8 outputLeftCrop( + m_outputWidth, m_outputHeight, + m_resizedDeviceLeftInput.getData() + + i * m_resizedDeviceLeftInput.getStride(TensorDimension::DEPTH), + false); + Tensor_HWC_C3_U8 inputRightCrop( + inputWidth, inputHeight, + const_cast(inputRight.getData()) + + i * inputRight.getStride(TensorDimension::DEPTH), + false); + Tensor_HWC_C3_U8 outputRightCrop(m_outputWidth, m_outputHeight, + m_resizedDeviceRightInput.getData() + + i * m_resizedDeviceRightInput.getStride(TensorDimension::DEPTH), + false); + cvcore::tensor_ops::CropAndResize(outputLeftCrop, inputLeftCrop, dstCrop, srcCrop, + cvcore::tensor_ops::InterpolationType::INTERP_LINEAR, stream); + cvcore::tensor_ops::CropAndResize(outputRightCrop, inputRightCrop, dstCrop, + srcCrop, cvcore::tensor_ops::InterpolationType::INTERP_LINEAR, stream); + } + } + + if (m_swapRB) { + cvcore::tensor_ops::ConvertColorFormat(m_resizedDeviceLeftInput, + m_resizedDeviceLeftInput, cvcore::tensor_ops::BGR2RGB, stream); + cvcore::tensor_ops::ConvertColorFormat(m_resizedDeviceRightInput, + m_resizedDeviceRightInput, cvcore::tensor_ops::BGR2RGB, stream); + } + + float scale[3]; + for (size_t i = 0; i < 3; i++) { + scale[i] = m_preProcessorParams.normalization[i] / m_preProcessorParams.stdDev[i]; + } + + cvcore::tensor_ops::Normalize(m_normalizedDeviceLeftInput, m_resizedDeviceLeftInput, + scale, m_preProcessorParams.pixelMean, stream); + cvcore::tensor_ops::Normalize(m_normalizedDeviceRightInput, m_resizedDeviceRightInput, + scale, m_preProcessorParams.pixelMean, stream); + cvcore::tensor_ops::InterleavedToPlanar(outputLeft, m_normalizedDeviceLeftInput, stream); + cvcore::tensor_ops::InterleavedToPlanar(outputRight, m_normalizedDeviceRightInput, stream); + } +}; + +void ESSPreProcessor::execute(Tensor_NCHW_C3_F32 & outputLeft, + Tensor_NCHW_C3_F32 & outputRight, + const Tensor_NHWC_C3_U8 & inputLeft, const Tensor_NHWC_C3_U8 & inputRight, + cudaStream_t stream) { + m_pImpl->process(outputLeft, outputRight, inputLeft, inputRight, stream); +} + +ESSPreProcessor::ESSPreProcessor(const ImagePreProcessingParams & preProcessorParams, + const ModelInputParams & modelInputParams, + const size_t output_width, const size_t output_height, + const ESSPreProcessorParams & essParams) + : m_pImpl(new ESSPreProcessor::ESSPreProcessorImpl(preProcessorParams, + modelInputParams, output_width, output_height, essParams)) {} + +ESSPreProcessor::~ESSPreProcessor() {} + +} // namespace ess +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/CameraModel.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/CameraModel.cpp deleted file mode 100644 index 5dce98e..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/CameraModel.cpp +++ /dev/null @@ -1,86 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "CameraModel.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -namespace detail { - -gxf::Expected<::cvcore::CameraDistortionType> GetCameraDistortionType(const std::string& type) { - if (type == "Perspective") { - return ::cvcore::CameraDistortionType::NONE; - } else if (type == "Polynomial") { - return ::cvcore::CameraDistortionType::Polynomial; - } else if (type == "FisheyeEquidistant") { - return ::cvcore::CameraDistortionType::FisheyeEquidistant; - } else if (type == "FisheyeEquisolid") { - return ::cvcore::CameraDistortionType::FisheyeEquisolid; - } else if (type == "FisheyeOrthoGraphic") { - return ::cvcore::CameraDistortionType::FisheyeOrthoGraphic; - } else if (type == "FisheyeStereographic") { - return ::cvcore::CameraDistortionType::FisheyeStereographic; - } else { - return gxf::Unexpected{GXF_FAILURE}; - } -} - -} // namespace detail - -gxf_result_t CameraModel::registerInterface(gxf::Registrar* registrar) { - gxf::Expected result; - - result &= registrar->parameter(distortion_type_, "distortion_type"); - result &= registrar->parameter(distortion_coefficients_, "distortion_coefficients"); - result &= registrar->parameter(focal_length_, "focal_length"); - result &= registrar->parameter(principle_point_, "principle_point"); - result &= registrar->parameter(skew_value_, "skew_value"); - - return gxf::ToResultCode(result); -} - -gxf_result_t CameraModel::initialize() { - // Construct distortion model - auto type = detail::GetCameraDistortionType(distortion_type_.get()); - if (!type) { - return GXF_FAILURE; - } - if (distortion_coefficients_.get().size() != 8) { - GXF_LOG_ERROR("size of distortion coefficients must be 8."); - return GXF_FAILURE; - } - for (size_t i = 0; i < 8; i++) { - distortions_.coefficients[i] = distortion_coefficients_.get()[i]; - } - distortions_.type = type.value(); - - // Construct intrinsic model - if (focal_length_.get().size() != 2 || principle_point_.get().size() != 2) { - GXF_LOG_ERROR("focal length and principle point must be 2-element array."); - return GXF_FAILURE; - } - intrinsics_ = ::cvcore::CameraIntrinsics(focal_length_.get()[0], focal_length_.get()[1], principle_point_.get()[0], - principle_point_.get()[1], skew_value_.get()); - - return GXF_SUCCESS; -} - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/CameraModel.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/CameraModel.hpp deleted file mode 100644 index e68c67f..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/CameraModel.hpp +++ /dev/null @@ -1,60 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_CAMERA_MODEL_HPP -#define NVIDIA_CVCORE_CAMERA_MODEL_HPP - -#include "gxf/core/component.hpp" -#include "gxf/std/parameter_parser_std.hpp" - -#include "cv/core/CameraModel.h" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -// Wrapper of CameraModel compatible with CVCORE -class CameraModel : public gxf::Component { -public: - virtual ~CameraModel() = default; - CameraModel() = default; - - gxf_result_t registerInterface(gxf::Registrar* registrar) override; - gxf_result_t initialize() override; - - ::cvcore::CameraDistortionModel getDistortionModel() const { - return distortions_; - } - ::cvcore::CameraIntrinsics getCameraIntrinsics() const { - return intrinsics_; - } - -private: - gxf::Parameter distortion_type_; - gxf::Parameter> distortion_coefficients_; - gxf::Parameter> focal_length_; - gxf::Parameter> principle_point_; - gxf::Parameter skew_value_; - - ::cvcore::CameraDistortionModel distortions_; - ::cvcore::CameraIntrinsics intrinsics_; -}; - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ConvertColorFormat.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ConvertColorFormat.cpp deleted file mode 100644 index 074e013..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ConvertColorFormat.cpp +++ /dev/null @@ -1,214 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include "ConvertColorFormat.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -namespace detail { - -template<::cvcore::ImageType T_IN, ::cvcore::ImageType T_OUT> -gxf_result_t ConvertColorFormatImpl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info, - const ImageInfo& input_info, const char* output_name, const char* input_name, - gxf::Handle output_adapter, gxf::Handle input_adapter, - gxf::Handle allocator, - ::cvcore::tensor_ops::ColorConversionType type, cudaStream_t stream) { - auto input_image = input_adapter->WrapImageFromMessage(input, input_name); - if (!input_image) { - return GXF_FAILURE; - } - - auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator, - output_info.is_cpu, output_name); - if (error != GXF_SUCCESS) { - return GXF_FAILURE; - } - - auto output_image = output_adapter->WrapImageFromMessage(output, output_name); - if (!output_image) { - return GXF_FAILURE; - } - - ::cvcore::tensor_ops::ConvertColorFormat(output_image.value(), input_image.value(), type, stream); - return GXF_SUCCESS; -} - -template<::cvcore::ImageType T_IN, ::cvcore::ImageType T_OUT> -gxf_result_t ConvertColorFormatStreamImpl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info, - const ImageInfo& input_info, const char* output_name, const char* input_name, - gxf::Handle stream, gxf::Handle output_adapter, - gxf::Handle input_adapter, - gxf::Handle allocator) { - auto input_image = input_adapter->WrapImageFromMessage(input, input_name); - if (!input_image) { - return GXF_FAILURE; - } - - auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator, - output_info.is_cpu, output_name); - if (error != GXF_SUCCESS) { - return GXF_FAILURE; - } - - auto output_image = output_adapter->WrapImageFromMessage(output, output_name); - if (!output_image) { - return GXF_FAILURE; - } - - auto err_code = stream->getStream()->ColorConvert(output_image.value(), input_image.value()); - if (err_code != ::cvcore::make_error_condition(::cvcore::ErrorCode::SUCCESS)) { - GXF_LOG_ERROR("color conversion operation failed."); - return GXF_FAILURE; - } - - return GXF_SUCCESS; -} - -} // namespace detail - -template -gxf_result_t ConvertColorFormatBase::registerInterface(gxf::Registrar* registrar) { - gxf::Expected result; - - result &= registrar->parameter(output_type_, "output_type"); - result &= registrar->parameter(receiver_, "receiver"); - result &= registrar->parameter(transmitter_, "transmitter"); - result &= registrar->parameter(pool_, "pool"); - result &= registrar->parameter(stream_, "stream", "tensor stream", "tensor stream object", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(stream_pool_, "stream_pool", "cuda stream pool", "cuda stream pool object", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(input_adapter_, "input_adapter"); - result &= registrar->parameter(output_adapter_, "output_adapter"); - result &= registrar->parameter(input_name_, "input_name", "input name", "input tensor name", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(output_name_, "output_name", "output name", "output tensor name", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - - return gxf::ToResultCode(result); -} - -template -gxf::Expected ConvertColorFormatBase::doInferOutputInfo(gxf::Entity& input) { - // Set output type - auto output_type = GetImageTypeFromString(output_type_); - if (!output_type) { - return gxf::Unexpected{GXF_FAILURE}; - } - // Check if no-op is needed - no_op_ = output_type.value() == input_info_.type; - return ImageInfo{output_type.value(), input_info_.width, input_info_.height, input_info_.is_cpu}; -} - -template -gxf_result_t ConvertColorFormatBase::doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) { - *output = *input; - return GXF_SUCCESS; -} - -#define DEFINE_CONVERT_COLOR_FORMAT(INPUT_TYPE, OUTPUT_TYPE, CONVERSION_TYPE) \ - if (input_info_.type == INPUT_TYPE && output_info_.type == OUTPUT_TYPE) { \ - return detail::ConvertColorFormatImpl( \ - output, input, output_info_, input_info_, output_name, input_name, output_adapter_.get(), input_adapter_.get(), \ - pool_.get(), CONVERSION_TYPE, stream); \ - } - -#define DEFINE_STREAM_CONVERT_COLOR_FORMAT(INPUT_TYPE, OUTPUT_TYPE) \ - if (input_info_.type == INPUT_TYPE && output_info_.type == OUTPUT_TYPE) { \ - return detail::ConvertColorFormatStreamImpl( \ - output, input, output_info_, input_info_, output_name, input_name, stream_.try_get().value(), \ - output_adapter_.get(), input_adapter_.get(), pool_.get()); \ - } - -template<> -gxf_result_t ConvertColorFormatBase::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, - const char* output_name, const char* input_name) { - GXF_LOG_INFO("execute convert color format"); - - // Run the color conversion operation - DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_U8, ::cvcore::ImageType::BGR_U8); - DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_U8, ::cvcore::ImageType::RGB_U8); - DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::NV12, ::cvcore::ImageType::BGR_U8); - DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_U8, ::cvcore::ImageType::NV12); - DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::NV12, ::cvcore::ImageType::RGB_U8); - DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_U8, ::cvcore::ImageType::NV12); - DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::NV24, ::cvcore::ImageType::BGR_U8); - DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_U8, ::cvcore::ImageType::NV24); - DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::NV24, ::cvcore::ImageType::RGB_U8); - DEFINE_STREAM_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_U8, ::cvcore::ImageType::NV24); - - // Return error code for unsupported type - GXF_LOG_ERROR("invalid input/output type for image color conversion."); - return GXF_FAILURE; -} - -template<> -gxf_result_t ConvertColorFormatBase::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, - const char* output_name, const char* input_name) { - GXF_LOG_INFO("execute convert color format"); - - // Run the color conversion operation - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_U8, ::cvcore::ImageType::BGR_U8, - ::cvcore::tensor_ops::ColorConversionType::RGB2BGR); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_U16, ::cvcore::ImageType::BGR_U16, - ::cvcore::tensor_ops::ColorConversionType::RGB2BGR); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_F32, ::cvcore::ImageType::BGR_F32, - ::cvcore::tensor_ops::ColorConversionType::RGB2BGR); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_U8, ::cvcore::ImageType::RGB_U8, - ::cvcore::tensor_ops::ColorConversionType::BGR2RGB); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_U16, ::cvcore::ImageType::RGB_U16, - ::cvcore::tensor_ops::ColorConversionType::BGR2RGB); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_F32, ::cvcore::ImageType::RGB_F32, - ::cvcore::tensor_ops::ColorConversionType::BGR2RGB); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_U8, ::cvcore::ImageType::Y_U8, - ::cvcore::tensor_ops::ColorConversionType::RGB2GRAY); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_U16, ::cvcore::ImageType::Y_U16, - ::cvcore::tensor_ops::ColorConversionType::RGB2GRAY); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::RGB_F32, ::cvcore::ImageType::Y_F32, - ::cvcore::tensor_ops::ColorConversionType::RGB2GRAY); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_U8, ::cvcore::ImageType::Y_U8, - ::cvcore::tensor_ops::ColorConversionType::BGR2GRAY); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_U16, ::cvcore::ImageType::Y_U16, - ::cvcore::tensor_ops::ColorConversionType::BGR2GRAY); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::BGR_F32, ::cvcore::ImageType::Y_F32, - ::cvcore::tensor_ops::ColorConversionType::BGR2GRAY); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::Y_U8, ::cvcore::ImageType::RGB_U8, - ::cvcore::tensor_ops::ColorConversionType::GRAY2RGB); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::Y_U16, ::cvcore::ImageType::RGB_U16, - ::cvcore::tensor_ops::ColorConversionType::GRAY2RGB); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::Y_F32, ::cvcore::ImageType::RGB_F32, - ::cvcore::tensor_ops::ColorConversionType::GRAY2RGB); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::Y_U8, ::cvcore::ImageType::BGR_U8, - ::cvcore::tensor_ops::ColorConversionType::GRAY2BGR); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::Y_U16, ::cvcore::ImageType::BGR_U16, - ::cvcore::tensor_ops::ColorConversionType::GRAY2BGR); - DEFINE_CONVERT_COLOR_FORMAT(::cvcore::ImageType::Y_F32, ::cvcore::ImageType::BGR_F32, - ::cvcore::tensor_ops::ColorConversionType::GRAY2BGR); - - // Return error code for unsupported type - GXF_LOG_ERROR("invalid input/output type for image color conversion."); - return GXF_FAILURE; -} - -template class ConvertColorFormatBase; -template class ConvertColorFormatBase; - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ConvertColorFormat.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ConvertColorFormat.hpp deleted file mode 100644 index f78786b..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ConvertColorFormat.hpp +++ /dev/null @@ -1,51 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_CONVERT_COLOR_FORMAT_HPP -#define NVIDIA_CVCORE_CONVERT_COLOR_FORMAT_HPP - -#include "TensorOperator.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -// ConvertColorFormat operator. -template -class ConvertColorFormatBase : public TensorOperator { -public: - virtual ~ConvertColorFormatBase() {} - - gxf_result_t registerInterface(gxf::Registrar* registrar) override final; - -private: - gxf::Expected doInferOutputInfo(gxf::Entity& input) override final; - gxf_result_t doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) override final; - gxf_result_t doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, const char* output_name, - const char* input_name) override final; - - gxf::Parameter output_type_; -}; - -class ConvertColorFormat : public ConvertColorFormatBase {}; -class StreamConvertColorFormat : public ConvertColorFormatBase {}; - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/CropAndResize.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/CropAndResize.cpp deleted file mode 100644 index d0c2872..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/CropAndResize.cpp +++ /dev/null @@ -1,161 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include "CropAndResize.hpp" -#include "Resize.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -namespace detail { - -template<::cvcore::ImageType T> -gxf_result_t CropAndResizeImpl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info, - const ImageInfo& input_info, const char* output_name, const char* input_name, - gxf::Handle output_adapter, gxf::Handle input_adapter, - gxf::Handle allocator, const std::vector<::cvcore::BBox>& src_rois, - ::cvcore::tensor_ops::InterpolationType interp_type, cudaStream_t stream) { - auto input_image = input_adapter->WrapImageFromMessage(input, input_name); - if (!input_image) { - return GXF_FAILURE; - } - - const size_t num_output = src_rois.size(); - - for (size_t i = 0; i < num_output; i++) { - const std::string output_name_i = std::string(output_name) + "_" + std::to_string(i); - auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator, - output_info.is_cpu, output_name_i.c_str()); - if (error != GXF_SUCCESS) { - return GXF_FAILURE; - } - auto output_image = output_adapter->WrapImageFromMessage(output, output_name_i.c_str()); - if (!output_image) { - return GXF_FAILURE; - } - ::cvcore::tensor_ops::CropAndResize(output_image.value(), input_image.value(), src_rois[i], interp_type, stream); - } - - return GXF_SUCCESS; -} - -} // namespace detail - -gxf_result_t CropAndResize::registerInterface(gxf::Registrar* registrar) { - gxf::Expected result; - - result &= registrar->parameter(output_width_, "output_width"); - result &= registrar->parameter(output_height_, "output_height"); - result &= registrar->parameter(interp_type_, "interp_type"); - result &= registrar->parameter(keep_aspect_ratio_, "keep_aspect_ratio"); - result &= registrar->parameter(receiver_bbox_, "receiver_bbox"); - result &= registrar->parameter(receiver_, "receiver"); - result &= registrar->parameter(transmitter_, "transmitter"); - result &= registrar->parameter(pool_, "pool"); - result &= registrar->parameter(stream_pool_, "stream_pool", "cuda stream pool", "cuda stream pool object", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(input_adapter_, "input_adapter"); - result &= registrar->parameter(output_adapter_, "output_adapter"); - result &= registrar->parameter(input_name_, "input_name", "input name", "input tensor name", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(output_name_, "output_name", "output name", "output tensor name", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - - return gxf::ToResultCode(result); -} - -gxf::Expected CropAndResize::doInferOutputInfo(gxf::Entity& input) { - // Set crop regions - auto input_bbox_message = receiver_bbox_->receive(); - if (!input_bbox_message) { - return gxf::Unexpected{GXF_FAILURE}; - } - auto bbox_tensor = input_bbox_message.value().get(); - if (!bbox_tensor) { - return gxf::Unexpected{GXF_FAILURE}; - } - const gxf::Shape bbox_shape = bbox_tensor.value()->shape(); - if (bbox_shape.rank() != 2 || bbox_shape.dimension(1) != 4) { - GXF_LOG_ERROR("invalid input bbox dimension."); - return gxf::Unexpected{GXF_FAILURE}; - } - const size_t num_bbox = bbox_shape.dimension(0); - auto bbox_pointer = bbox_tensor.value()->data(); - if (!bbox_pointer) { - GXF_LOG_ERROR("empty bbox input."); - return gxf::Unexpected{GXF_FAILURE}; - } - std::vector<::cvcore::BBox> rois; - for (size_t i = 0; i < num_bbox; i++) { - const int index = i * 4; - rois.push_back({bbox_pointer.value()[index], bbox_pointer.value()[index + 1], bbox_pointer.value()[index + 2], - bbox_pointer.value()[index + 3]}); - } - input_rois_ = std::move(rois); - // Check if no-op is needed - no_op_ = input_rois_.size() == 1 && input_rois_[0].xmin == 0 && - input_rois_[0].xmax == static_cast(input_info_.width) && input_rois_[0].ymin == 0 && - input_rois_[0].ymax == static_cast(input_info_.height); - - return ImageInfo{input_info_.type, output_width_.get(), output_height_.get(), input_info_.is_cpu}; -} - -gxf_result_t CropAndResize::doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) { - auto crop_result = GetCroppedCameraModel(*input, input_rois_[0]); - if (!crop_result) { - return GXF_FAILURE; - } - *output = GetScaledCameraModel(crop_result.value(), output_info_.width, output_info_.height, false).value(); - return GXF_SUCCESS; -} - -#define DEFINE_CROP_AND_RESIZE(INPUT_TYPE) \ - if (input_info_.type == INPUT_TYPE) { \ - return detail::CropAndResizeImpl(output, input, output_info_, input_info_, output_name, input_name, \ - output_adapter_.get(), input_adapter_.get(), pool_.get(), \ - input_rois_, interp.value(), stream); \ - } - -gxf_result_t CropAndResize::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, - const char* output_name, const char* input_name) { - GXF_LOG_INFO("execute crop_and_resize."); - // Check if interpolation type is valid - auto interp = GetInterpolationType(interp_type_); - if (!interp) { - return interp.error(); - } - - // Run the image resizing operation - DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::Y_U8); - DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::Y_U16); - DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::Y_F32); - DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::RGB_U8); - DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::RGB_U16); - DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::RGB_F32); - DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::BGR_U8); - DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::BGR_U16); - DEFINE_CROP_AND_RESIZE(::cvcore::ImageType::BGR_F32); - - // Return error code for unsupported type - GXF_LOG_ERROR("invalid input/output type for image crop_and_resize."); - return GXF_FAILURE; -} - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/CropAndResize.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/CropAndResize.hpp deleted file mode 100644 index 2bde6fb..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/CropAndResize.hpp +++ /dev/null @@ -1,53 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_CROP_AND_RESIZE_HPP -#define NVIDIA_CVCORE_CROP_AND_RESIZE_HPP - -#include "TensorOperator.hpp" -#include "cv/core/BBox.h" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -// CropAndResize operator. -class CropAndResize : public TensorOperator { -public: - virtual ~CropAndResize() {} - - gxf_result_t registerInterface(gxf::Registrar* registrar) override; - -private: - gxf::Expected doInferOutputInfo(gxf::Entity& input) override final; - gxf_result_t doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) override final; - gxf_result_t doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, const char* output_name, - const char* input_name) override final; - - gxf::Parameter output_width_; - gxf::Parameter output_height_; - gxf::Parameter interp_type_; - gxf::Parameter keep_aspect_ratio_; - gxf::Parameter> receiver_bbox_; - std::vector<::cvcore::BBox> input_rois_; -}; - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Frame3D.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Frame3D.cpp deleted file mode 100644 index c28829f..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Frame3D.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "Frame3D.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -gxf_result_t Frame3D::registerInterface(gxf::Registrar* registrar) { - gxf::Expected result; - - result &= registrar->parameter(rotation_, "rotation"); - result &= registrar->parameter(translation_, "translation"); - - return gxf::ToResultCode(result); -} - -gxf_result_t Frame3D::initialize() { - // Construct extrinsic model - if (rotation_.get().size() != 9) { - GXF_LOG_ERROR("size of rotation matrix must be 9"); - return GXF_FAILURE; - } - if (translation_.get().size() != 3) { - GXF_LOG_ERROR("size of translation vector must be 3"); - return GXF_FAILURE; - } - float raw_matrix[3][4]; - for (size_t i = 0; i < 9; i++) { - raw_matrix[i / 3][i % 3] = rotation_.get()[i]; - } - for (size_t i = 0; i < 3; i++) { - raw_matrix[i][3] = translation_.get()[i]; - } - extrinsics_ = ::cvcore::CameraExtrinsics(raw_matrix); - return GXF_SUCCESS; -} - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Frame3D.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Frame3D.hpp deleted file mode 100644 index aec8cf5..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Frame3D.hpp +++ /dev/null @@ -1,53 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_FRAME3D_HPP -#define NVIDIA_CVCORE_FRAME3D_HPP - -#include "gxf/core/component.hpp" -#include "gxf/std/parameter_parser_std.hpp" - -#include "cv/core/CameraModel.h" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -// Wrapper of CameraExtrinsics compatible with CVCORE -class Frame3D : public gxf::Component { -public: - virtual ~Frame3D() = default; - Frame3D() = default; - - gxf_result_t registerInterface(gxf::Registrar* registrar) override; - gxf_result_t initialize() override; - - ::cvcore::CameraExtrinsics getCameraExtrinsics() const { - return extrinsics_; - } - -private: - gxf::Parameter> rotation_; - gxf::Parameter> translation_; - - ::cvcore::CameraExtrinsics extrinsics_; -}; - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageAdapter.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageAdapter.cpp deleted file mode 100644 index 12e7fe7..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageAdapter.cpp +++ /dev/null @@ -1,78 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include "ImageAdapter.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -gxf_result_t ImageAdapter::registerInterface(gxf::Registrar* registrar) { - gxf::Expected result; - - result &= registrar->parameter(message_type_param_, "message_type"); - result &= registrar->parameter(image_type_param_, "image_type", "image type", "optional image type", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(allocate_pitch_linear_, "allocate_pitch_linear", - "if true, allocate output buffers as padded pitch linear surfaces", "", false); - - return gxf::ToResultCode(result); -} - -gxf_result_t ImageAdapter::initialize() { - if (message_type_param_.get() == "Tensor") { - message_type_ = BufferType::TENSOR; - } else if (message_type_param_.get() == "VideoBuffer") { - message_type_ = BufferType::VIDEO_BUFFER; - } else { - GXF_LOG_ERROR("unknown buffer type."); - return GXF_FAILURE; - } - - const auto& image_type_param = image_type_param_.try_get(); - if (message_type_ == BufferType::TENSOR && !image_type_param) { - GXF_LOG_INFO("image type must be specified for gxf::Tensor."); - return GXF_FAILURE; - } - if (image_type_param) { - const auto image_type = GetImageTypeFromString(image_type_param.value()); - if (!image_type) { - return GXF_FAILURE; - } - image_type_ = image_type.value(); - } - return GXF_SUCCESS; -} - -gxf::Expected ImageAdapter::GetImageInfo(const gxf::Entity& message, const char* name) { - if (message_type_ == BufferType::TENSOR) { - auto tensor = message.get(name); - if (!tensor) { - return gxf::Unexpected{GXF_FAILURE}; - } - return detail::GetTensorInfo(tensor.value(), image_type_); - } else { - auto video_buffer = message.get(name); - if (!video_buffer) { - return gxf::Unexpected{GXF_FAILURE}; - } - return detail::GetVideoBufferInfo(video_buffer.value()); - } -} - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageAdapter.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageAdapter.hpp deleted file mode 100644 index 3b41391..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageAdapter.hpp +++ /dev/null @@ -1,101 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_IMAGE_ADAPTER_HPP -#define NVIDIA_CVCORE_IMAGE_ADAPTER_HPP - -#include "ImageUtils.hpp" -#include "detail/ImageAdapterTensorImpl.hpp" -#include "detail/ImageAdapterVideoBufferImpl.hpp" - -#include "gxf/core/component.hpp" -#include "gxf/multimedia/video.hpp" -#include "gxf/std/allocator.hpp" -#include "gxf/std/parameter_parser_std.hpp" -#include "gxf/std/tensor.hpp" - -#include "cv/core/Image.h" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -// Enum class: gxf::Tensor and gxf::VideoBuffer -enum class BufferType { - TENSOR, - VIDEO_BUFFER, -}; - -// Utility component for conversion between message and cvcore image type -class ImageAdapter : public gxf::Component { -public: - virtual ~ImageAdapter() = default; - ImageAdapter() = default; - - gxf_result_t registerInterface(gxf::Registrar* registrar) override; - gxf_result_t initialize() override; - - gxf::Expected GetImageInfo(const gxf::Entity& message, const char* name = nullptr); - - template<::cvcore::ImageType T> - gxf::Expected<::cvcore::Image> WrapImageFromMessage(const gxf::Entity& message, const char* name = nullptr) { - if (message_type_ == BufferType::TENSOR) { - auto tensor = message.get(name); - if (!tensor) { - return gxf::Unexpected{GXF_FAILURE}; - } - return detail::WrapImageFromTensor(tensor.value()); - } else { - auto video_buffer = message.get(name); - if (!video_buffer) { - return gxf::Unexpected{GXF_FAILURE}; - } - return detail::WrapImageFromVideoBuffer(video_buffer.value()); - } - } - - template<::cvcore::ImageType T> - gxf_result_t AddImageToMessage(gxf::Entity& message, size_t width, size_t height, - gxf::Handle allocator, bool is_cpu, const char* name = nullptr) { - if (message_type_ == BufferType::TENSOR) { - auto tensor = message.add(name); - if (!tensor) { - return GXF_FAILURE; - } - return detail::AllocateTensor(tensor.value(), width, height, allocator, is_cpu, allocate_pitch_linear_.get()); - } else { - auto video_buffer = message.add(name); - if (!video_buffer) { - return GXF_FAILURE; - } - return detail::AllocateVideoBuffer(video_buffer.value(), width, height, allocator, is_cpu, allocate_pitch_linear_.get()); - } - } - -private: - gxf::Parameter message_type_param_; - gxf::Parameter image_type_param_; - gxf::Parameter allocate_pitch_linear_; - - ::cvcore::ImageType image_type_; - BufferType message_type_; -}; - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageUtils.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageUtils.cpp deleted file mode 100644 index ffb5bcc..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageUtils.cpp +++ /dev/null @@ -1,175 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include "ImageUtils.hpp" - -#include -#include - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -// helper function to match input image type string to cvcore::ImageType -gxf::Expected<::cvcore::ImageType> GetImageTypeFromString(const std::string& type) { - if (type == "Y_U8") { - return ::cvcore::ImageType::Y_U8; - } else if (type == "Y_U16") { - return ::cvcore::ImageType::Y_U16; - } else if (type == "Y_F32") { - return ::cvcore::ImageType::Y_F32; - } else if (type == "RGB_U8") { - return ::cvcore::ImageType::RGB_U8; - } else if (type == "RGB_U16") { - return ::cvcore::ImageType::RGB_U16; - } else if (type == "RGB_F32") { - return ::cvcore::ImageType::RGB_F32; - } else if (type == "BGR_U8") { - return ::cvcore::ImageType::BGR_U8; - } else if (type == "BGR_U16") { - return ::cvcore::ImageType::BGR_U16; - } else if (type == "BGR_F32") { - return ::cvcore::ImageType::BGR_F32; - } else if (type == "PLANAR_RGB_U8") { - return ::cvcore::ImageType::PLANAR_RGB_U8; - } else if (type == "PLANAR_RGB_U16") { - return ::cvcore::ImageType::PLANAR_RGB_U16; - } else if (type == "PLANAR_RGB_F32") { - return ::cvcore::ImageType::PLANAR_RGB_F32; - } else if (type == "PLANAR_BGR_U8") { - return ::cvcore::ImageType::PLANAR_BGR_U8; - } else if (type == "PLANAR_BGR_U16") { - return ::cvcore::ImageType::PLANAR_BGR_U16; - } else if (type == "PLANAR_BGR_F32") { - return ::cvcore::ImageType::PLANAR_BGR_F32; - } else if (type == "NV12") { - return ::cvcore::ImageType::NV12; - } else if (type == "NV24") { - return ::cvcore::ImageType::NV24; - } else { - GXF_LOG_ERROR("invalid image type."); - return gxf::Unexpected{GXF_FAILURE}; - } -} - -gxf::Expected<::cvcore::tensor_ops::InterpolationType> GetInterpolationType(const std::string& type) { - if (type == "nearest") { - return ::cvcore::tensor_ops::InterpolationType::INTERP_NEAREST; - } else if (type == "linear") { - return ::cvcore::tensor_ops::InterpolationType::INTERP_LINEAR; - } else if (type == "cubic_bspline") { - return ::cvcore::tensor_ops::InterpolationType::INTERP_CUBIC_BSPLINE; - } else if (type == "cubic_catmullrom") { - return ::cvcore::tensor_ops::InterpolationType::INTERP_CUBIC_CATMULLROM; - } else { - GXF_LOG_ERROR("invalid interpolation type."); - return gxf::Unexpected{GXF_FAILURE}; - } -} - -gxf::Expected<::cvcore::tensor_ops::BorderType> GetBorderType(const std::string& type) { - if (type == "zero") { - return ::cvcore::tensor_ops::BorderType::BORDER_ZERO; - } else if (type == "repeat") { - return ::cvcore::tensor_ops::BorderType::BORDER_REPEAT; - } else if (type == "reverse") { - return ::cvcore::tensor_ops::BorderType::BORDER_REVERSE; - } else if (type == "mirror") { - return ::cvcore::tensor_ops::BorderType::BORDER_MIRROR; - } else { - GXF_LOG_ERROR("invalid border type."); - return gxf::Unexpected{GXF_FAILURE}; - } -} - -gxf::Expected<::cvcore::CameraDistortionType> GetCameraDistortionType(gxf::DistortionType type) { - switch (type) { - case gxf::DistortionType::Perspective: - return ::cvcore::CameraDistortionType::NONE; - case gxf::DistortionType::Polynomial: - return ::cvcore::CameraDistortionType::Polynomial; - case gxf::DistortionType::FisheyeEquidistant: - return ::cvcore::CameraDistortionType::FisheyeEquidistant; - case gxf::DistortionType::FisheyeEquisolid: - return ::cvcore::CameraDistortionType::FisheyeEquisolid; - case gxf::DistortionType::FisheyeOrthoGraphic: - return ::cvcore::CameraDistortionType::FisheyeOrthoGraphic; - case gxf::DistortionType::FisheyeStereographic: - return ::cvcore::CameraDistortionType::FisheyeStereographic; - default: - GXF_LOG_ERROR("invalid distortion type."); - return gxf::Unexpected{GXF_FAILURE}; - } -} - -gxf::Expected GetDistortionType(::cvcore::CameraDistortionType type) { - switch (type) { - case ::cvcore::CameraDistortionType::Polynomial: - return gxf::DistortionType::Polynomial; - case ::cvcore::CameraDistortionType::FisheyeEquidistant: - return gxf::DistortionType::FisheyeEquidistant; - case ::cvcore::CameraDistortionType::FisheyeEquisolid: - return gxf::DistortionType::FisheyeEquisolid; - case ::cvcore::CameraDistortionType::FisheyeOrthoGraphic: - return gxf::DistortionType::FisheyeOrthoGraphic; - case ::cvcore::CameraDistortionType::FisheyeStereographic: - return gxf::DistortionType::FisheyeStereographic; - default: - GXF_LOG_ERROR("invalid distortion type."); - return gxf::Unexpected{GXF_FAILURE}; - } -} - -gxf::Expected GetCroppedCameraModel(const gxf::CameraModel& input, const ::cvcore::BBox& roi) { - if (!roi.isValid()) { - return gxf::Unexpected{GXF_FAILURE}; - } - gxf::CameraModel camera; - const size_t output_width = roi.xmax - roi.xmin; - const size_t output_height = roi.ymax - roi.ymin; - camera.dimensions = {static_cast(output_width), static_cast(output_height)}; - camera.focal_length = input.focal_length; - // We will keep the relative principal point location unchanged for cropping; - camera.principal_point = {input.principal_point.x / input.dimensions.x * output_width, - input.principal_point.y / input.dimensions.y * output_height}, - camera.skew_value = input.skew_value; - camera.distortion_type = input.distortion_type; - std::copy(std::begin(input.distortion_coefficients), std::end(input.distortion_coefficients), - std::begin(camera.distortion_coefficients)); - return camera; -} - -gxf::Expected GetScaledCameraModel(const gxf::CameraModel& input, size_t output_width, - size_t output_height, bool keep_aspect_ratio) { - gxf::CameraModel camera; - const float scaler_x = static_cast(output_width) / input.dimensions.x; - const float scaler_y = static_cast(output_height) / input.dimensions.y; - const float min_scaler = std::min(scaler_x, scaler_y); - camera.dimensions = {static_cast(output_width), static_cast(output_height)}; - camera.focal_length = keep_aspect_ratio - ? nvidia::gxf::Vector2f{min_scaler * input.focal_length.x, min_scaler * input.focal_length.y} - : nvidia::gxf::Vector2f{scaler_x * input.focal_length.x, scaler_y * input.focal_length.y}; - camera.principal_point = {scaler_x * input.principal_point.x, scaler_y * input.principal_point.y}, - camera.skew_value = input.skew_value; - camera.distortion_type = input.distortion_type; - std::copy(std::begin(input.distortion_coefficients), std::end(input.distortion_coefficients), - std::begin(camera.distortion_coefficients)); - return camera; -} - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageUtils.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageUtils.hpp deleted file mode 100644 index d052827..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/ImageUtils.hpp +++ /dev/null @@ -1,65 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_IMAGE_UTILS_HPP -#define NVIDIA_CVCORE_IMAGE_UTILS_HPP - -#include "cv/core/BBox.h" -#include "cv/core/CameraModel.h" -#include "cv/core/Image.h" -#include "cv/tensor_ops/ImageUtils.h" -#include "gxf/core/expected.hpp" -#include "gxf/multimedia/camera.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -// Description of Image -struct ImageInfo { - ::cvcore::ImageType type; - size_t width; - size_t height; - bool is_cpu; -}; - -// helper function to match input image type string to cvcore::ImageType -gxf::Expected<::cvcore::ImageType> GetImageTypeFromString(const std::string& type); - -// Helper function to get the interpolation type -gxf::Expected<::cvcore::tensor_ops::InterpolationType> GetInterpolationType(const std::string& type); - -// Helper function to get the border type -gxf::Expected<::cvcore::tensor_ops::BorderType> GetBorderType(const std::string& type); - -// Helper function to get the cvcore camera distortion type -gxf::Expected<::cvcore::CameraDistortionType> GetCameraDistortionType(gxf::DistortionType type); - -// Helper function to get the gxf distortion type -gxf::Expected GetDistortionType(::cvcore::CameraDistortionType type); - -// Helper function to get the new camera model after applying crop operation -gxf::Expected GetCroppedCameraModel(const gxf::CameraModel& input, const ::cvcore::BBox& roi); - -// Helper function to get the new camera model after applying scale operation -gxf::Expected GetScaledCameraModel(const gxf::CameraModel& input, size_t output_width, - size_t output_height, bool keep_aspect_ratio); - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/InterleavedToPlanar.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/InterleavedToPlanar.cpp deleted file mode 100644 index 7b63825..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/InterleavedToPlanar.cpp +++ /dev/null @@ -1,146 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include "InterleavedToPlanar.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -namespace detail { - -template<::cvcore::ImageType T_IN, ::cvcore::ImageType T_OUT> -gxf_result_t InterleavedToPlanarImpl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info, - const ImageInfo& input_info, const char* output_name, const char* input_name, - gxf::Handle output_adapter, gxf::Handle input_adapter, - gxf::Handle allocator, cudaStream_t stream) { - auto input_image = input_adapter->WrapImageFromMessage(input, input_name); - if (!input_image) { - return GXF_FAILURE; - } - - auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator, - output_info.is_cpu, output_name); - if (error != GXF_SUCCESS) { - return GXF_FAILURE; - } - - auto output_image = output_adapter->WrapImageFromMessage(output, output_name); - if (!output_image) { - return GXF_FAILURE; - } - ::cvcore::tensor_ops::InterleavedToPlanar(output_image.value(), input_image.value(), stream); - return GXF_SUCCESS; -} - -} // namespace detail - -gxf_result_t InterleavedToPlanar::registerInterface(gxf::Registrar* registrar) { - gxf::Expected result; - - result &= registrar->parameter(receiver_, "receiver"); - result &= registrar->parameter(transmitter_, "transmitter"); - result &= registrar->parameter(pool_, "pool"); - result &= registrar->parameter(stream_pool_, "stream_pool", "cuda stream pool", "cuda stream pool object", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(input_adapter_, "input_adapter"); - result &= registrar->parameter(output_adapter_, "output_adapter"); - result &= registrar->parameter(input_name_, "input_name", "input name", "input tensor name", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(output_name_, "output_name", "output name", "output tensor name", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - - return gxf::ToResultCode(result); -} - -gxf::Expected InterleavedToPlanar::doInferOutputInfo(gxf::Entity& input) { - // Output type is planar - ::cvcore::ImageType output_type; - switch (input_info_.type) { - case ::cvcore::ImageType::RGB_U8: { - output_type = ::cvcore::ImageType::PLANAR_RGB_U8; - break; - } - case ::cvcore::ImageType::RGB_U16: { - output_type = ::cvcore::ImageType::PLANAR_RGB_U16; - break; - } - case ::cvcore::ImageType::RGB_F32: { - output_type = ::cvcore::ImageType::PLANAR_RGB_F32; - break; - } - case ::cvcore::ImageType::BGR_U8: { - output_type = ::cvcore::ImageType::PLANAR_BGR_U8; - break; - } - case ::cvcore::ImageType::BGR_U16: { - output_type = ::cvcore::ImageType::PLANAR_BGR_U16; - break; - } - case ::cvcore::ImageType::BGR_F32: { - output_type = ::cvcore::ImageType::PLANAR_BGR_F32; - break; - } - case ::cvcore::ImageType::PLANAR_RGB_U8: - case ::cvcore::ImageType::PLANAR_RGB_U16: - case ::cvcore::ImageType::PLANAR_RGB_F32: - case ::cvcore::ImageType::PLANAR_BGR_U8: - case ::cvcore::ImageType::PLANAR_BGR_U16: - case ::cvcore::ImageType::PLANAR_BGR_F32: { - output_type = input_info_.type; - no_op_ = true; - break; - } - default: { - GXF_LOG_ERROR("invalid input type for interleaved to planar conversion."); - return gxf::Unexpected{GXF_FAILURE}; - } - } - return ImageInfo{output_type, input_info_.width, input_info_.height, input_info_.is_cpu}; -} - -gxf_result_t InterleavedToPlanar::doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) { - *output = *input; - return GXF_SUCCESS; -} - -#define DEFINE_INTERLEAVED_TO_PLANAR(INPUT_TYPE, OUTPUT_TYPE) \ - if (input_info_.type == INPUT_TYPE && output_info_.type == OUTPUT_TYPE) { \ - return detail::InterleavedToPlanarImpl(output, input, output_info_, input_info_, \ - output_name, input_name, output_adapter_.get(), \ - input_adapter_.get(), pool_.get(), stream); \ - } - -gxf_result_t InterleavedToPlanar::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, - const char* output_name, const char* input_name) { - GXF_LOG_INFO("execute interleaved_to_planar conversion"); - // Run the interleaved to planar operation - DEFINE_INTERLEAVED_TO_PLANAR(::cvcore::ImageType::RGB_U8, ::cvcore::ImageType::PLANAR_RGB_U8); - DEFINE_INTERLEAVED_TO_PLANAR(::cvcore::ImageType::RGB_U16, ::cvcore::ImageType::PLANAR_RGB_U16); - DEFINE_INTERLEAVED_TO_PLANAR(::cvcore::ImageType::RGB_F32, ::cvcore::ImageType::PLANAR_RGB_F32); - DEFINE_INTERLEAVED_TO_PLANAR(::cvcore::ImageType::BGR_U8, ::cvcore::ImageType::PLANAR_BGR_U8); - DEFINE_INTERLEAVED_TO_PLANAR(::cvcore::ImageType::BGR_U16, ::cvcore::ImageType::PLANAR_BGR_U16); - DEFINE_INTERLEAVED_TO_PLANAR(::cvcore::ImageType::BGR_F32, ::cvcore::ImageType::PLANAR_BGR_F32); - - // Return error code for unsupported type - GXF_LOG_ERROR("invalid input/output type for image interleaved to planar conversion."); - return GXF_FAILURE; -} - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/InterleavedToPlanar.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/InterleavedToPlanar.hpp deleted file mode 100644 index 2e1efbe..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/InterleavedToPlanar.hpp +++ /dev/null @@ -1,45 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_INTERLEAVED_TO_PLANAR_HPP -#define NVIDIA_CVCORE_INTERLEAVED_TO_PLANAR_HPP - -#include "TensorOperator.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -// InterleavedToPlanar operator. -class InterleavedToPlanar : public TensorOperator { -public: - virtual ~InterleavedToPlanar() {} - - gxf_result_t registerInterface(gxf::Registrar* registrar) override; - -private: - gxf::Expected doInferOutputInfo(gxf::Entity& input) override final; - gxf_result_t doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) override final; - gxf_result_t doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, const char* output_name, - const char* input_name) override final; -}; - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Normalize.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Normalize.cpp deleted file mode 100644 index 2429abb..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Normalize.cpp +++ /dev/null @@ -1,183 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include "Normalize.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -namespace detail { - -template<::cvcore::ImageType T_IN, ::cvcore::ImageType T_OUT> -gxf_result_t NormalizeC1Impl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info, - const ImageInfo& input_info, const char* output_name, const char* input_name, - gxf::Handle output_adapter, gxf::Handle input_adapter, - gxf::Handle allocator, const std::vector& scales, - const std::vector& offsets, cudaStream_t stream) { - if (scales.size() != 1 || offsets.size() != 1) { - GXF_LOG_ERROR("invalid scales/offsets dimension"); - return GXF_FAILURE; - } - - auto input_image = input_adapter->WrapImageFromMessage(input, input_name); - if (!input_image) { - return GXF_FAILURE; - } - - auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator, - output_info.is_cpu, output_name); - if (error != GXF_SUCCESS) { - return GXF_FAILURE; - } - - auto output_image = output_adapter->WrapImageFromMessage(output, output_name); - if (!output_image) { - return GXF_FAILURE; - } - ::cvcore::tensor_ops::Normalize(output_image.value(), input_image.value(), scales[0], offsets[0], stream); - return GXF_SUCCESS; -} - -template<::cvcore::ImageType T_IN, ::cvcore::ImageType T_OUT> -gxf_result_t NormalizeC3Impl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info, - const ImageInfo& input_info, const char* output_name, const char* input_name, - gxf::Handle output_adapter, gxf::Handle input_adapter, - gxf::Handle allocator, const std::vector& scales, - const std::vector& offsets, cudaStream_t stream) { - if (scales.size() != 3 || offsets.size() != 3) { - GXF_LOG_ERROR("invalid scales/offsets dimension"); - return GXF_FAILURE; - } - - auto input_image = input_adapter->WrapImageFromMessage(input, input_name); - if (!input_image) { - return GXF_FAILURE; - } - - auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator, - output_info.is_cpu, output_name); - if (error != GXF_SUCCESS) { - return GXF_FAILURE; - } - - auto output_image = output_adapter->WrapImageFromMessage(output, output_name); - if (!output_image) { - return GXF_FAILURE; - } - const float scales_value[3] = {scales[0], scales[1], scales[2]}; - const float offsets_value[3] = {offsets[0], offsets[1], offsets[2]}; - ::cvcore::tensor_ops::Normalize(output_image.value(), input_image.value(), scales_value, offsets_value, stream); - return GXF_SUCCESS; -} - -} // namespace detail - -gxf_result_t Normalize::registerInterface(gxf::Registrar* registrar) { - gxf::Expected result; - - result &= registrar->parameter(scales_, "scales"); - result &= registrar->parameter(offsets_, "offsets"); - result &= registrar->parameter(receiver_, "receiver"); - result &= registrar->parameter(transmitter_, "transmitter"); - result &= registrar->parameter(pool_, "pool"); - result &= registrar->parameter(stream_pool_, "stream_pool", "cuda stream pool", "cuda stream pool object", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(input_adapter_, "input_adapter"); - result &= registrar->parameter(output_adapter_, "output_adapter"); - result &= registrar->parameter(input_name_, "input_name", "input name", "input tensor name", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(output_name_, "output_name", "output name", "output tensor name", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - - return gxf::ToResultCode(result); -} - -gxf::Expected Normalize::doInferOutputInfo(gxf::Entity& input) { - // Output type is F32 - ::cvcore::ImageType output_type; - switch (input_info_.type) { - case ::cvcore::ImageType::Y_U8: - case ::cvcore::ImageType::Y_U16: - case ::cvcore::ImageType::Y_F32: { - output_type = ::cvcore::ImageType::Y_F32; - break; - } - case ::cvcore::ImageType::RGB_U8: - case ::cvcore::ImageType::RGB_U16: - case ::cvcore::ImageType::RGB_F32: { - output_type = ::cvcore::ImageType::RGB_F32; - break; - } - case ::cvcore::ImageType::BGR_U8: - case ::cvcore::ImageType::BGR_U16: - case ::cvcore::ImageType::BGR_F32: { - output_type = ::cvcore::ImageType::BGR_F32; - break; - } - default: { - GXF_LOG_ERROR("invalid input type for normalize."); - return gxf::Unexpected{GXF_FAILURE}; - } - } - // Operation must be performed under any condition - no_op_ = false; - return ImageInfo{output_type, input_info_.width, input_info_.height, input_info_.is_cpu}; -} - -gxf_result_t Normalize::doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) { - *output = *input; - return GXF_SUCCESS; -} - -#define DEFINE_NORMALIZE_C1(INPUT_TYPE, OUTPUT_TYPE) \ - if (input_info_.type == INPUT_TYPE && output_info_.type == OUTPUT_TYPE) { \ - return detail::NormalizeC1Impl(output, input, output_info_, input_info_, output_name, \ - input_name, output_adapter_.get(), input_adapter_.get(), \ - pool_.get(), scales_.get(), offsets_.get(), stream); \ - } - -#define DEFINE_NORMALIZE_C3(INPUT_TYPE, OUTPUT_TYPE) \ - if (input_info_.type == INPUT_TYPE && output_info_.type == OUTPUT_TYPE) { \ - return detail::NormalizeC3Impl(output, input, output_info_, input_info_, output_name, \ - input_name, output_adapter_.get(), input_adapter_.get(), \ - pool_.get(), scales_.get(), offsets_.get(), stream); \ - } - -gxf_result_t Normalize::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, const char* output_name, - const char* input_name) { - GXF_LOG_INFO("execute normalize"); - - // Run the image normalization operation - DEFINE_NORMALIZE_C1(::cvcore::ImageType::Y_U8, ::cvcore::ImageType::Y_F32); - DEFINE_NORMALIZE_C1(::cvcore::ImageType::Y_U16, ::cvcore::ImageType::Y_F32); - DEFINE_NORMALIZE_C1(::cvcore::ImageType::Y_F32, ::cvcore::ImageType::Y_F32); - DEFINE_NORMALIZE_C3(::cvcore::ImageType::RGB_U8, ::cvcore::ImageType::RGB_F32); - DEFINE_NORMALIZE_C3(::cvcore::ImageType::RGB_U16, ::cvcore::ImageType::RGB_F32); - DEFINE_NORMALIZE_C3(::cvcore::ImageType::RGB_F32, ::cvcore::ImageType::RGB_F32); - DEFINE_NORMALIZE_C3(::cvcore::ImageType::BGR_U8, ::cvcore::ImageType::BGR_F32); - DEFINE_NORMALIZE_C3(::cvcore::ImageType::BGR_U16, ::cvcore::ImageType::BGR_F32); - DEFINE_NORMALIZE_C3(::cvcore::ImageType::BGR_F32, ::cvcore::ImageType::BGR_F32); - - // Return error code for unsupported type - GXF_LOG_ERROR("invalid input/output type for image normalize."); - return GXF_FAILURE; -} - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Normalize.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Normalize.hpp deleted file mode 100644 index efb735a..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Normalize.hpp +++ /dev/null @@ -1,47 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_NORMALIZE_HPP -#define NVIDIA_CVCORE_NORMALIZE_HPP - -#include "TensorOperator.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -// Normalization operator. -class Normalize : public TensorOperator { -public: - virtual ~Normalize() {} - gxf_result_t registerInterface(gxf::Registrar* registrar) override; - -private: - gxf::Expected doInferOutputInfo(gxf::Entity& input) override final; - gxf_result_t doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) override final; - gxf_result_t doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, const char* output_name, - const char* input_name) override final; - - gxf::Parameter> scales_; - gxf::Parameter> offsets_; -}; - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Reshape.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Reshape.cpp deleted file mode 100644 index 322b843..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Reshape.cpp +++ /dev/null @@ -1,98 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include "Reshape.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -gxf_result_t Reshape::registerInterface(gxf::Registrar* registrar) { - gxf::Expected result; - - result &= registrar->parameter(output_shape_, "output_shape"); - result &= registrar->parameter(receiver_, "receiver"); - result &= registrar->parameter(transmitter_, "transmitter"); - result &= registrar->parameter(pool_, "pool"); - result &= registrar->parameter(input_adapter_, "input_adapter"); - result &= registrar->parameter(output_adapter_, "output_adapter"); - result &= registrar->parameter(input_name_, "input_name", "input name", "input tensor name", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(output_name_, "output_name", "output name", "output tensor name", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - - return gxf::ToResultCode(result); -} - -gxf_result_t Reshape::doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) { - *output = *input; - return GXF_SUCCESS; -} - -gxf_result_t Reshape::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, const char* output_name, - const char* input_name) { - GXF_LOG_INFO("execute reshape."); - - auto input_tensor = input.get(input_name); - if (!input_tensor) { - GXF_LOG_ERROR("input message does not contain Tensor"); - return input_tensor.error(); - } - - auto output_tensor = output.add(output_name); - if (!output_tensor) { - GXF_LOG_ERROR("unable to add output Tensor"); - return output_tensor.error(); - } - - const auto& input_shape = input_tensor.value()->shape(); - const std::vector& output_shape_arr = output_shape_; - std::array dims = {}; - std::copy(output_shape_arr.begin(), output_shape_arr.end(), dims.begin()); - const auto output_shape = gxf::Shape(dims, output_shape_arr.size()); - - if (output_shape.size() != input_shape.size()) { - GXF_LOG_ERROR("reshape size mismatch."); - return GXF_FAILURE; - } - - auto result = output_tensor.value()->reshapeCustom( - output_shape, input_tensor.value()->element_type(), gxf::PrimitiveTypeSize(input_tensor.value()->element_type()), - gxf::Unexpected{GXF_UNINITIALIZED_VALUE}, input_tensor.value()->storage_type(), pool_.get()); - - if (!result) { - GXF_LOG_ERROR("reshape tensor failed."); - return result.error(); - } - - // Simply copy the memory - if (input_tensor.value()->storage_type() == gxf::MemoryStorageType::kDevice) { - cudaError_t error = cudaMemcpyAsync(output_tensor.value()->pointer(), input_tensor.value()->pointer(), - input_tensor.value()->size(), cudaMemcpyDeviceToDevice, stream); - if (error != cudaSuccess) { - GXF_LOG_ERROR("cudaMemcpyAsync returned error code"); - return GXF_FAILURE; - } - } else { - memcpy(output_tensor.value()->pointer(), input_tensor.value()->pointer(), input_tensor.value()->size()); - } - return GXF_SUCCESS; -} - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Reshape.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Reshape.hpp deleted file mode 100644 index c6f1c6f..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Reshape.hpp +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_RESHAPE_HPP -#define NVIDIA_CVCORE_RESHAPE_HPP - -#include "TensorOperator.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -// Reshaping operator (only valid for gxf::Tensor). -class Reshape : public TensorOperator { -public: - virtual ~Reshape() {} - gxf_result_t registerInterface(gxf::Registrar* registrar) override; - -private: - gxf::Expected doInferOutputInfo(gxf::Entity& input) override final { - no_op_ = false; - return ImageInfo{}; - }; - gxf_result_t doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) override final; - gxf_result_t doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, const char* output_name, - const char* input_name) override final; - - gxf::Parameter> output_shape_; -}; - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Resize.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Resize.cpp deleted file mode 100644 index 943ac5a..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Resize.cpp +++ /dev/null @@ -1,194 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include "Resize.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -namespace detail { - -template<::cvcore::ImageType T> -gxf_result_t ResizeImpl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info, - const ImageInfo& input_info, const char* output_name, const char* input_name, - gxf::Handle output_adapter, gxf::Handle input_adapter, - gxf::Handle allocator, bool keep_aspect_ratio, - ::cvcore::tensor_ops::InterpolationType interp_type, cudaStream_t stream) { - auto input_image = input_adapter->WrapImageFromMessage(input, input_name); - if (!input_image) { - return GXF_FAILURE; - } - - auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator, - output_info.is_cpu, output_name); - if (error != GXF_SUCCESS) { - return GXF_FAILURE; - } - - auto output_image = output_adapter->WrapImageFromMessage(output, output_name); - if (!output_image) { - return GXF_FAILURE; - } - ::cvcore::tensor_ops::Resize(output_image.value(), input_image.value(), keep_aspect_ratio, interp_type, stream); - return GXF_SUCCESS; -} - -template<::cvcore::ImageType T> -gxf_result_t ResizeStreamImpl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info, - const ImageInfo& input_info, const char* output_name, const char* input_name, - gxf::Handle stream, gxf::Handle output_adapter, - gxf::Handle input_adapter, gxf::Handle allocator, - ::cvcore::tensor_ops::InterpolationType interp_type, - ::cvcore::tensor_ops::BorderType border_type) { - auto input_image = input_adapter->WrapImageFromMessage(input, input_name); - if (!input_image) { - return GXF_FAILURE; - } - - auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator, - output_info.is_cpu, output_name); - if (error != GXF_SUCCESS) { - return GXF_FAILURE; - } - - auto output_image = output_adapter->WrapImageFromMessage(output, output_name); - if (!output_image) { - return GXF_FAILURE; - } - - auto err_code = stream->getStream()->Resize(output_image.value(), input_image.value(), interp_type, border_type); - if (err_code != ::cvcore::make_error_condition(::cvcore::ErrorCode::SUCCESS)) { - GXF_LOG_ERROR("resize operation failed."); - return GXF_FAILURE; - } - - return GXF_SUCCESS; -} - -} // namespace detail - -template -gxf_result_t ResizeBase::registerInterface(gxf::Registrar* registrar) { - gxf::Expected result; - - result &= registrar->parameter(output_width_, "output_width"); - result &= registrar->parameter(output_height_, "output_height"); - result &= registrar->parameter(interp_type_, "interp_type"); - result &= registrar->parameter(border_type_, "border_type"); - result &= registrar->parameter(keep_aspect_ratio_, "keep_aspect_ratio"); - result &= registrar->parameter(receiver_, "receiver"); - result &= registrar->parameter(transmitter_, "transmitter"); - result &= registrar->parameter(pool_, "pool"); - result &= registrar->parameter(stream_, "stream", "tensor stream", "tensor stream object", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(stream_pool_, "stream_pool", "cuda stream pool", "cuda stream pool object", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(input_adapter_, "input_adapter"); - result &= registrar->parameter(output_adapter_, "output_adapter"); - result &= registrar->parameter(input_name_, "input_name", "input name", "input tensor name", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(output_name_, "output_name", "output name", "output tensor name", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - - return gxf::ToResultCode(result); -} - -template -gxf::Expected ResizeBase::doInferOutputInfo(gxf::Entity& input) { - // Check if no-op is needed - no_op_ = output_width_.get() == input_info_.width && output_height_.get() == input_info_.height; - return ImageInfo{input_info_.type, output_width_.get(), output_height_.get(), input_info_.is_cpu}; -} - -template -gxf_result_t ResizeBase::doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) { - *output = GetScaledCameraModel(*input, output_info_.width, output_info_.height, keep_aspect_ratio_.get()).value(); - return GXF_SUCCESS; -} - -#define DEFINE_RESIZE(INPUT_TYPE) \ - if (input_info_.type == INPUT_TYPE) { \ - return detail::ResizeImpl(output, input, output_info_, input_info_, output_name, input_name, \ - output_adapter_.get(), input_adapter_.get(), pool_.get(), \ - keep_aspect_ratio_.get(), interp.value(), stream); \ - } - -#define DEFINE_STREAM_RESIZE(INPUT_TYPE) \ - if (input_info_.type == INPUT_TYPE) { \ - return detail::ResizeStreamImpl(output, input, output_info_, input_info_, output_name, input_name, \ - stream_.try_get().value(), output_adapter_.get(), \ - input_adapter_.get(), pool_.get(), interp.value(), border.value()); \ - } - -template<> -gxf_result_t ResizeBase::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, - const char* output_name, const char* input_name) { - GXF_LOG_INFO("execute resize."); - // Check if interpolation type is valid - auto interp = GetInterpolationType(interp_type_); - if (!interp) { - return interp.error(); - } - auto border = GetBorderType(border_type_); - if (!border) { - return border.error(); - } - - // Run the image resizing operation - DEFINE_STREAM_RESIZE(::cvcore::ImageType::RGB_U8); - DEFINE_STREAM_RESIZE(::cvcore::ImageType::BGR_U8); - DEFINE_STREAM_RESIZE(::cvcore::ImageType::NV12); - DEFINE_STREAM_RESIZE(::cvcore::ImageType::NV24); - - // Return error code for unsupported type - GXF_LOG_ERROR("invalid input/output type for image resize."); - return GXF_FAILURE; -} - -template<> -gxf_result_t ResizeBase::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, - const char* output_name, const char* input_name) { - GXF_LOG_INFO("execute resize."); - // Check if interpolation type is valid - auto interp = GetInterpolationType(interp_type_); - if (!interp) { - return interp.error(); - } - - // Run the image resizing operation - DEFINE_RESIZE(::cvcore::ImageType::Y_U8); - DEFINE_RESIZE(::cvcore::ImageType::Y_U16); - DEFINE_RESIZE(::cvcore::ImageType::Y_F32); - DEFINE_RESIZE(::cvcore::ImageType::RGB_U8); - DEFINE_RESIZE(::cvcore::ImageType::RGB_U16); - DEFINE_RESIZE(::cvcore::ImageType::RGB_F32); - DEFINE_RESIZE(::cvcore::ImageType::BGR_U8); - DEFINE_RESIZE(::cvcore::ImageType::BGR_U16); - DEFINE_RESIZE(::cvcore::ImageType::BGR_F32); - - // Return error code for unsupported type - GXF_LOG_ERROR("invalid input/output type for image resize."); - return GXF_FAILURE; -} - -template class ResizeBase; -template class ResizeBase; - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Resize.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Resize.hpp deleted file mode 100644 index 6771e6e..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Resize.hpp +++ /dev/null @@ -1,55 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_RESIZE_HPP -#define NVIDIA_CVCORE_RESIZE_HPP - -#include "TensorOperator.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -// Resizing operator. -template -class ResizeBase : public TensorOperator { -public: - virtual ~ResizeBase() {} - - gxf_result_t registerInterface(gxf::Registrar* registrar) override; - -private: - gxf::Expected doInferOutputInfo(gxf::Entity& input) override final; - gxf_result_t doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) override final; - gxf_result_t doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, const char* output_name, - const char* input_name) override final; - - gxf::Parameter output_width_; - gxf::Parameter output_height_; - gxf::Parameter interp_type_; - gxf::Parameter border_type_; - gxf::Parameter keep_aspect_ratio_; -}; - -class Resize : public ResizeBase {}; -class StreamResize : public ResizeBase {}; - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorOperator.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorOperator.cpp deleted file mode 100644 index 294fb06..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorOperator.cpp +++ /dev/null @@ -1,235 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include "TensorOperator.hpp" - -#include "gxf/std/timestamp.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -namespace detail { - -// Function to bind a cuda stream with cid into downstream message -gxf_result_t BindCudaStream(gxf::Entity& message, gxf_uid_t cid) { - if (cid == kNullUid) { - GXF_LOG_ERROR("stream_cid is null"); - return GXF_FAILURE; - } - auto output_stream_id = message.add("stream"); - if (!output_stream_id) { - GXF_LOG_ERROR("failed to add cudastreamid."); - return GXF_FAILURE; - } - output_stream_id.value()->stream_cid = cid; - return GXF_SUCCESS; -} - -// Function to record a new cuda event -gxf_result_t RecordCudaEvent(gxf::Entity& message, gxf::Handle& stream) { - // Create a new event - cudaEvent_t cuda_event; - cudaEventCreateWithFlags(&cuda_event, 0); - gxf::CudaEvent event; - auto ret = event.initWithEvent(cuda_event, stream->dev_id(), [](auto) {}); - if (!ret) { - GXF_LOG_ERROR("failed to init cuda event"); - return GXF_FAILURE; - } - // Record the event - // Can define []() { GXF_LOG_DEBUG("tensorops event synced"); } as callback func for debug purpose - ret = stream->record(event.event().value(), - [event = cuda_event, entity = message.clone().value()](auto) { cudaEventDestroy(event); }); - if (!ret) { - GXF_LOG_ERROR("record event failed"); - return ret.error(); - } - return GXF_SUCCESS; -} - -template -gxf_result_t RerouteMessage(gxf::Entity& output, gxf::Entity& input, - std::function, gxf::Handle)> func, - const char* name = nullptr) { - auto maybe_component = input.get(); - if (maybe_component) { - auto output_component = output.add(name != nullptr ? name : maybe_component.value().name()); - if (!output_component) { - GXF_LOG_ERROR("add output component failed."); - return output_component.error(); - } - return func(output_component.value(), maybe_component.value()); - } - return GXF_SUCCESS; -} - -} // namespace detail - -gxf_result_t TensorOperator::inferOutputInfo(gxf::Entity& input) { - const char* input_name = input_name_.try_get() ? input_name_.try_get().value().c_str() : nullptr; - auto input_info = input_adapter_.get()->GetImageInfo(input, input_name); - if (!input_info) { - return input_info.error(); - } - input_info_ = input_info.value(); - auto output_info = doInferOutputInfo(input); - if (!output_info) { - return output_info.error(); - } - output_info_ = output_info.value(); - return GXF_SUCCESS; -} - -gxf_result_t TensorOperator::updateCameraMessage(gxf::Handle& output, - gxf::Handle& input) { - return doUpdateCameraMessage(output, input); -} - -gxf_result_t TensorOperator::execute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream) { - const char* output_name = output_name_.try_get() ? output_name_.try_get().value().c_str() : nullptr; - const char* input_name = input_name_.try_get() ? input_name_.try_get().value().c_str() : nullptr; - return doExecute(output, input, stream, output_name, input_name); -} - -gxf_result_t TensorOperator::start() { - // Allocate cuda stream using stream pool if necessary - if (stream_pool_.try_get()) { - auto stream = stream_pool_.try_get().value()->allocateStream(); - if (!stream) { - GXF_LOG_ERROR("allocating stream failed."); - return GXF_FAILURE; - } - cuda_stream_ptr_ = std::move(stream.value()); - if (!cuda_stream_ptr_->stream()) { - GXF_LOG_ERROR("allocated stream is not initialized."); - return GXF_FAILURE; - } - } - return GXF_SUCCESS; -} - -gxf_result_t TensorOperator::tick() { - // Receiving the data - auto input_message = receiver_->receive(); - // Check received message for errors - if (!input_message) { - return input_message.error(); - } - // Infer output ImageInfo and if it's no-op - auto error = inferOutputInfo(input_message.value()); - if (error != GXF_SUCCESS) { - return error; - } - // Re-direct the input message if no-op is needed - if (no_op_) { - transmitter_->publish(input_message.value()); - return GXF_SUCCESS; - } - // Create output message - gxf::Expected output_message = gxf::Entity::New(context()); - if (!output_message) { - return output_message.error(); - } - // Pass through timestamp if presented in input message - error = - detail::RerouteMessage(output_message.value(), input_message.value(), - [](gxf::Handle output, gxf::Handle input) { - *output = *input; - return GXF_SUCCESS; - }); - if (error != GXF_SUCCESS) { - return error; - } - // Pass through cudaStreamId or create a new cuda stream for NPP backend only - cudaStream_t cuda_stream = 0; // default stream - if (!stream_.try_get()) { - // Allocate new CudaStream if StreamPool attached - if (stream_pool_.try_get()) { - cuda_stream = cuda_stream_ptr_->stream().value(); - if (detail::BindCudaStream(output_message.value(), cuda_stream_ptr_.cid()) != GXF_SUCCESS) { - return GXF_FAILURE; - } - } - auto input_stream_id = input_message.value().get(); - if (input_stream_id) { - auto stream = - gxf::Handle::Create(input_stream_id.value().context(), input_stream_id.value()->stream_cid); - if (!stream) { - GXF_LOG_ERROR("create cudastream from cid failed."); - return GXF_FAILURE; - } - if (stream_pool_.try_get()) { - // sync upstreaming input cuda stream - if (!stream.value()->syncStream()) { - GXF_LOG_ERROR("sync stream failed."); - return GXF_FAILURE; - } - } else { - cuda_stream = stream.value()->stream().value(); - if (detail::BindCudaStream(output_message.value(), stream.value().cid()) != GXF_SUCCESS) { - return GXF_FAILURE; - } - cuda_stream_ptr_ = stream.value(); - } - } - } - // Execute the operation - error = execute(output_message.value(), input_message.value(), cuda_stream); - if (error != GXF_SUCCESS) { - GXF_LOG_ERROR("operation failed."); - return GXF_FAILURE; - } - // Record the cuda event if necessary - if (!cuda_stream_ptr_.is_null()) { - // record on both input/output stream - if (detail::RecordCudaEvent(input_message.value(), cuda_stream_ptr_) != GXF_SUCCESS) { - return GXF_FAILURE; - } - if (detail::RecordCudaEvent(output_message.value(), cuda_stream_ptr_) != GXF_SUCCESS) { - return GXF_FAILURE; - } - } - // Update output camera message if necessary - error = detail::RerouteMessage( - output_message.value(), input_message.value(), - [this](gxf::Handle output, gxf::Handle input) { - return updateCameraMessage(output, input); - }, - "camera"); - if (error != GXF_SUCCESS) { - return error; - } - // Pass through pose3d message if necessary - error = detail::RerouteMessage( - output_message.value(), input_message.value(), - [](gxf::Handle output, gxf::Handle input) { - *output = *input; - return GXF_SUCCESS; - }, - "pose"); - if (error != GXF_SUCCESS) { - return error; - } - // Send the processed data - transmitter_->publish(output_message.value()); - - return GXF_SUCCESS; -} - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorOperator.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorOperator.hpp deleted file mode 100644 index 4c65d47..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorOperator.hpp +++ /dev/null @@ -1,95 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_TENSOR_OPERATOR_HPP -#define NVIDIA_CVCORE_TENSOR_OPERATOR_HPP - -#include "ImageAdapter.hpp" -#include "ImageUtils.hpp" -#include "TensorStream.hpp" - -#include "gxf/cuda/cuda_stream.hpp" -#include "gxf/cuda/cuda_stream_id.hpp" -#include "gxf/cuda/cuda_stream_pool.hpp" -#include "gxf/std/allocator.hpp" -#include "gxf/std/codelet.hpp" -#include "gxf/std/parameter_parser_std.hpp" -#include "gxf/std/receiver.hpp" -#include "gxf/std/tensor.hpp" -#include "gxf/std/transmitter.hpp" - -#include "cv/core/Image.h" -#include "cv/core/Tensor.h" -#include "cv/tensor_ops/ImageUtils.h" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -// Base class for all tensor_ops operators -class TensorOperator : public gxf::Codelet { -public: - virtual ~TensorOperator() = default; - - gxf_result_t inferOutputInfo(gxf::Entity& input); - - gxf_result_t updateCameraMessage(gxf::Handle& output, gxf::Handle& input); - - gxf_result_t execute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream); - - gxf_result_t start() override; - - gxf_result_t tick() override; - - virtual gxf_result_t stop() override { - return GXF_SUCCESS; - } - -protected: - gxf::Parameter> receiver_; - gxf::Parameter> transmitter_; - gxf::Parameter> pool_; - gxf::Parameter> stream_; - gxf::Parameter> stream_pool_; - gxf::Parameter> input_adapter_; - gxf::Parameter> output_adapter_; - gxf::Parameter input_name_; - gxf::Parameter output_name_; - - // Input image info - ImageInfo input_info_; - // Output image info - ImageInfo output_info_; - // Whether to skip the operation(by default is false) - bool no_op_ = false; - -private: - virtual gxf::Expected doInferOutputInfo(gxf::Entity& input) = 0; - - virtual gxf_result_t doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) = 0; - - virtual gxf_result_t doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, const char* output_name, - const char* input_name) = 0; - - gxf::Handle cuda_stream_ptr_ = nullptr; -}; - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorStream.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorStream.cpp deleted file mode 100644 index 2f825cf..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorStream.cpp +++ /dev/null @@ -1,124 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include "TensorStream.hpp" - -#include "cv/core/ComputeEngine.h" -#include "cv/tensor_ops/TensorOperators.h" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -namespace detail { - -gxf::Expected<::cvcore::tensor_ops::TensorBackend> GetContextType(const std::string& type) { - if (type == "NPP") { - return ::cvcore::tensor_ops::TensorBackend::NPP; - } else if (type == "VPI") { - return ::cvcore::tensor_ops::TensorBackend::VPI; - } else if (type == "DALI") { - return ::cvcore::tensor_ops::TensorBackend::DALI; - } else { - return gxf::Unexpected{GXF_FAILURE}; - } -} - -gxf::Expected<::cvcore::ComputeEngine> GetComputeEngineType(const std::string& type) { - if (type == "UNKNOWN") { - return ::cvcore::ComputeEngine::UNKNOWN; - } else if (type == "CPU") { - return ::cvcore::ComputeEngine::CPU; - } else if (type == "PVA") { - return ::cvcore::ComputeEngine::PVA; - } else if (type == "VIC") { - return ::cvcore::ComputeEngine::VIC; - } else if (type == "NVENC") { - return ::cvcore::ComputeEngine::NVENC; - } else if (type == "GPU") { - return ::cvcore::ComputeEngine::GPU; - } else if (type == "DLA") { - return ::cvcore::ComputeEngine::DLA; - } else if (type == "COMPUTE_FAULT") { - return ::cvcore::ComputeEngine::COMPUTE_FAULT; - } else { - return gxf::Unexpected{GXF_FAILURE}; - } -} - -} // namespace detail - -gxf_result_t TensorStream::registerInterface(gxf::Registrar* registrar) { - gxf::Expected result; - - result &= registrar->parameter(backend_type_, "backend_type"); - result &= registrar->parameter(engine_type_, "engine_type"); - - return gxf::ToResultCode(result); -} - -gxf_result_t TensorStream::initialize() { - // Construct context - auto backend_type = detail::GetContextType(backend_type_.get()); - if (!backend_type) { - GXF_LOG_ERROR("unknown backend type."); - return GXF_FAILURE; - } - if (!::cvcore::tensor_ops::TensorContextFactory::IsBackendSupported(backend_type.value())) { - GXF_LOG_ERROR("unsupported context type."); - return GXF_FAILURE; - } - auto err_code = ::cvcore::tensor_ops::TensorContextFactory::CreateContext(context_, backend_type.value()); - if (err_code != ::cvcore::make_error_code(::cvcore::ErrorCode::SUCCESS)) { - GXF_LOG_ERROR("tensor context creation failed."); - return GXF_FAILURE; - } - // Construct stream - auto engine_type = detail::GetComputeEngineType(engine_type_.get()); - if (!engine_type) { - return GXF_FAILURE; - } - - if (!context_->IsComputeEngineCompatible(engine_type.value())) { - GXF_LOG_ERROR("invalid compute engine type."); - return GXF_FAILURE; - } - err_code = context_->CreateStream(stream_, engine_type.value()); - if (err_code != ::cvcore::make_error_code(::cvcore::ErrorCode::SUCCESS)) { - GXF_LOG_ERROR("tensor stream creation failed."); - return GXF_FAILURE; - } - return GXF_SUCCESS; -} - -gxf_result_t TensorStream::deinitialize() { - auto err_code = context_->DestroyStream(stream_); - if (err_code != ::cvcore::make_error_code(::cvcore::ErrorCode::SUCCESS)) { - GXF_LOG_ERROR("tensor stream destroy failed."); - return GXF_FAILURE; - } - err_code = ::cvcore::tensor_ops::TensorContextFactory::DestroyContext(context_); - if (err_code != ::cvcore::make_error_code(::cvcore::ErrorCode::SUCCESS)) { - GXF_LOG_ERROR("tensor context destroy failed."); - return GXF_FAILURE; - } - return GXF_SUCCESS; -} - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorStream.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorStream.hpp deleted file mode 100644 index 710e11f..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/TensorStream.hpp +++ /dev/null @@ -1,59 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_TENSOR_STREAM_HPP -#define NVIDIA_CVCORE_TENSOR_STREAM_HPP - -#include "gxf/core/component.hpp" -#include "gxf/std/parameter_parser_std.hpp" - -#include "cv/tensor_ops/ITensorOperatorContext.h" -#include "cv/tensor_ops/ITensorOperatorStream.h" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -// Wrapper of CVCORE ITensorOperatorStream/ITensorOperatorContext -class TensorStream : public gxf::Component { -public: - virtual ~TensorStream() = default; - TensorStream() = default; - - gxf_result_t registerInterface(gxf::Registrar* registrar) override; - gxf_result_t initialize() override; - gxf_result_t deinitialize() override; - - ::cvcore::tensor_ops::TensorOperatorContext getContext() const { - return context_; - } - ::cvcore::tensor_ops::TensorOperatorStream getStream() const { - return stream_; - } - -private: - gxf::Parameter backend_type_; - gxf::Parameter engine_type_; - - ::cvcore::tensor_ops::TensorOperatorContext context_; - ::cvcore::tensor_ops::TensorOperatorStream stream_; -}; - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Undistort.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Undistort.cpp deleted file mode 100644 index b1eed4e..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Undistort.cpp +++ /dev/null @@ -1,285 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 - -#include - -#include "ImageUtils.hpp" -#include "Undistort.hpp" -#include "gxf/multimedia/camera.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -namespace detail { - -template<::cvcore::ImageType T> -gxf_result_t UndistortImpl(gxf::Entity& output, gxf::Entity& input, const ImageInfo& output_info, - const ImageInfo& input_info, const char* output_name, const char* input_name, - gxf::Handle stream, gxf::Handle output_adapter, - gxf::Handle input_adapter, gxf::Handle allocator, - ::cvcore::tensor_ops::ImageWarp warp, ::cvcore::tensor_ops::InterpolationType interp_type, - ::cvcore::tensor_ops::BorderType border_type) { - auto input_image = input_adapter->WrapImageFromMessage(input, input_name); - if (!input_image) { - return GXF_FAILURE; - } - - auto error = output_adapter->AddImageToMessage(output, output_info.width, output_info.height, allocator, - output_info.is_cpu, output_name); - if (error != GXF_SUCCESS) { - return GXF_FAILURE; - } - - auto output_image = output_adapter->WrapImageFromMessage(output, output_name); - if (!output_image) { - return GXF_FAILURE; - } - - auto err_code = stream->getStream()->Remap(output_image.value(), input_image.value(), warp, interp_type, border_type); - if (err_code != ::cvcore::make_error_condition(::cvcore::ErrorCode::SUCCESS)) { - GXF_LOG_ERROR("undistort operation failed."); - return GXF_FAILURE; - } - - return GXF_SUCCESS; -} - -gxf::Expected<::cvcore::CameraIntrinsics> GetIntrinsicsFromMessage(gxf::Handle& camera_model) { - return ::cvcore::CameraIntrinsics(camera_model->focal_length.x, camera_model->focal_length.y, - camera_model->principal_point.x, camera_model->principal_point.y, - camera_model->skew_value); -} - -gxf::Expected<::cvcore::CameraExtrinsics> GetExtrinsicsFromMessage(gxf::Handle& pose) { - float raw_matrix[3][4]; - for (size_t i = 0; i < 9; i++) { - raw_matrix[i / 3][i % 3] = pose->rotation[i]; - } - for (size_t i = 0; i < 3; i++) { - raw_matrix[i][3] = pose->translation[i]; - } - return ::cvcore::CameraExtrinsics(raw_matrix); -} - -gxf::Expected<::cvcore::CameraDistortionModel> GetDistortionsFromMessage(gxf::Handle& camera_model) { - auto distortion_type = GetCameraDistortionType(camera_model->distortion_type); - if (!distortion_type) { - return gxf::Unexpected{GXF_FAILURE}; - } - auto distortion_model = ::cvcore::CameraDistortionModel(); - for (size_t i = 0; i < 8; i++) { - distortion_model.coefficients[i] = camera_model->distortion_coefficients[i]; - } - distortion_model.type = distortion_type.value(); - return distortion_model; -} - -} // namespace detail - -gxf_result_t UndistortBase::start() { - // Load grid object - image_grid_.numHorizRegions = regions_width_.get().size(); - image_grid_.numVertRegions = regions_height_.get().size(); - if (regions_width_.get().size() != horizontal_intervals_.get().size() || - regions_height_.get().size() != vertical_intervals_.get().size()) { - GXF_LOG_ERROR("invalid image grid."); - return GXF_FAILURE; - } - std::copy(regions_width_.get().begin(), regions_width_.get().end(), image_grid_.regionWidth.begin()); - std::copy(regions_height_.get().begin(), regions_height_.get().end(), image_grid_.regionHeight.begin()); - std::copy(horizontal_intervals_.get().begin(), horizontal_intervals_.get().end(), image_grid_.horizInterval.begin()); - std::copy(vertical_intervals_.get().begin(), vertical_intervals_.get().end(), image_grid_.vertInterval.begin()); - output_shape_.x = static_cast( - std::accumulate(image_grid_.regionWidth.begin(), image_grid_.regionWidth.end(), 0)); - output_shape_.y = static_cast( - std::accumulate(image_grid_.regionHeight.begin(), image_grid_.regionHeight.end(), 0)); - - // Generate Image Warp if possible - if (input_camera_model_.try_get() && reference_frame_.try_get()) { - input_camera_info_ = {input_camera_model_.try_get().value()->getCameraIntrinsics(), - reference_frame_.try_get().value()->getCameraExtrinsics(), - input_camera_model_.try_get().value()->getDistortionModel()}; - - output_camera_intrinsics_ = output_camera_model_.try_get() - ? output_camera_model_.try_get().value()->getCameraIntrinsics() - : input_camera_info_.intrinsic; - - auto err_code = stream_->getStream()->GenerateWarpFromCameraModel(image_warp_, image_grid_, input_camera_info_, - output_camera_intrinsics_); - if (err_code != ::cvcore::make_error_condition(::cvcore::ErrorCode::SUCCESS)) { - GXF_LOG_ERROR("image warp creation failed."); - return GXF_FAILURE; - } - } - - return GXF_SUCCESS; -} - -gxf_result_t UndistortBase::stop() { - auto err_code = stream_->getStream()->DestroyWarp(image_warp_); - if (err_code != ::cvcore::make_error_condition(::cvcore::ErrorCode::SUCCESS)) { - GXF_LOG_ERROR("image warp de-allocation failed."); - return GXF_FAILURE; - } - return GXF_SUCCESS; -} - -gxf_result_t UndistortBase::registerInterface(gxf::Registrar* registrar) { - gxf::Expected result; - - result &= registrar->parameter(input_camera_model_, "input_camera_model", "", "", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(reference_frame_, "reference_frame", "", "", gxf::Registrar::NoDefaultParameter(), - GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(output_camera_model_, "output_camera_model", "", "", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(regions_width_, "regions_width"); - result &= registrar->parameter(regions_height_, "regions_height"); - result &= registrar->parameter(horizontal_intervals_, "horizontal_intervals"); - result &= registrar->parameter(vertical_intervals_, "vertical_intervals"); - result &= registrar->parameter(interp_type_, "interp_type"); - result &= registrar->parameter(border_type_, "border_type"); - result &= registrar->parameter(receiver_, "receiver"); - result &= registrar->parameter(transmitter_, "transmitter"); - result &= registrar->parameter(pool_, "pool"); - result &= registrar->parameter(stream_, "stream"); - result &= registrar->parameter(stream_pool_, "stream_pool", "cuda stream pool", "cuda stream pool object", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(input_adapter_, "input_adapter"); - result &= registrar->parameter(output_adapter_, "output_adapter"); - result &= registrar->parameter(input_name_, "input_name", "input name", "input tensor name", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - result &= registrar->parameter(output_name_, "output_name", "output name", "output tensor name", - gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); - - return gxf::ToResultCode(result); -} - -gxf::Expected UndistortBase::doInferOutputInfo(gxf::Entity& input) { - // Check if the input distortion type is Perpective - auto maybe_camera_message = input.get(); - if (maybe_camera_message) { - no_op_ = maybe_camera_message.value()->distortion_type == gxf::DistortionType::Perspective; - } - // Output size may vary, but the format must be the same - return ImageInfo{input_info_.type, static_cast(output_shape_.x), static_cast(output_shape_.y), - input_info_.is_cpu}; -} - -gxf_result_t UndistortBase::doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) { - *output = *input; - (*output).distortion_type = gxf::DistortionType::Perspective; - for (size_t i = 0; i < gxf::CameraModel::kMaxDistortionCoefficients; i++) { - (*output).distortion_coefficients[i] = 0.; - } - (*output).dimensions = output_shape_; - (*output).focal_length.x = output_camera_intrinsics_.fx(); - (*output).focal_length.y = output_camera_intrinsics_.fy(); - (*output).principal_point.x = output_camera_intrinsics_.cx(); - (*output).principal_point.y = output_camera_intrinsics_.cy(); - (*output).skew_value = output_camera_intrinsics_.skew(); - return GXF_SUCCESS; -} - -#define DEFINE_UNDISTORT(INPUT_TYPE) \ - if (input_info_.type == INPUT_TYPE) { \ - return detail::UndistortImpl(output, input, output_info_, input_info_, output_name, input_name, \ - stream_.get(), output_adapter_.get(), input_adapter_.get(), pool_.get(), \ - image_warp_, interp.value(), border.value()); \ - } - -gxf_result_t UndistortBase::doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, - const char* output_name, const char* input_name) { - GXF_LOG_INFO("execute undistort."); - auto maybe_camera_message = input.get(); - auto maybe_pose3d_message = input.get(); - if (!maybe_camera_message) { - if (image_warp_ == nullptr) { - GXF_LOG_ERROR("no camera information found."); - return GXF_FAILURE; - } - } else { - auto maybe_intrinsics = detail::GetIntrinsicsFromMessage(maybe_camera_message.value()); - auto maybe_distortions = detail::GetDistortionsFromMessage(maybe_camera_message.value()); - if (!maybe_intrinsics || !maybe_distortions) { - return GXF_FAILURE; - } - const auto& new_intrinsics = maybe_intrinsics.value(); - const auto& new_distortions = maybe_distortions.value(); - - auto new_extrinsics = maybe_pose3d_message ? detail::GetExtrinsicsFromMessage(maybe_pose3d_message.value()).value() - : ::cvcore::CameraExtrinsics(); - - const bool reset = image_warp_ == nullptr || new_intrinsics != input_camera_info_.intrinsic || - new_distortions != input_camera_info_.distortion || - new_extrinsics != input_camera_info_.extrinsic; - - if (reset) { - auto new_width = static_cast(output_shape_.x); - auto new_height = static_cast(output_shape_.y); - - // These two parameters (width_scale and height_scale) can be - // used to determine a crop or pad regime depending on which dimension to - // preserve in the case of keep_aspect ratio. In this case, we assume - // always_crop=True, or that we will always use the largest dimension - // change. - auto width_scale = new_width / static_cast(input_info_.width); - auto height_scale = new_height / static_cast(input_info_.height); - auto scale = std::max({width_scale, height_scale}); // Always crop - - input_camera_info_ = {new_intrinsics, new_extrinsics, new_distortions}; - output_camera_intrinsics_ = new_intrinsics; - output_camera_intrinsics_.m_intrinsics[0][0] = scale * new_intrinsics.m_intrinsics[0][0]; - output_camera_intrinsics_.m_intrinsics[0][1] = scale * new_intrinsics.m_intrinsics[0][1]; - output_camera_intrinsics_.m_intrinsics[1][1] = scale * new_intrinsics.m_intrinsics[1][1]; - output_camera_intrinsics_.m_intrinsics[0][2] = scale * new_intrinsics.m_intrinsics[0][2]; - output_camera_intrinsics_.m_intrinsics[1][2] = scale * new_intrinsics.m_intrinsics[1][2]; - - auto err_code = stream_->getStream()->GenerateWarpFromCameraModel(image_warp_, image_grid_, input_camera_info_, - output_camera_intrinsics_); - if (err_code != ::cvcore::make_error_condition(::cvcore::ErrorCode::SUCCESS)) { - GXF_LOG_ERROR("image warp creation failed."); - return GXF_FAILURE; - } - } - } - - auto interp = GetInterpolationType(interp_type_); - if (!interp) { - return interp.error(); - } - auto border = GetBorderType(border_type_); - if (!border) { - return border.error(); - } - - // Run the image undistortion operation - DEFINE_UNDISTORT(::cvcore::ImageType::BGR_U8); - DEFINE_UNDISTORT(::cvcore::ImageType::RGB_U8); - DEFINE_UNDISTORT(::cvcore::ImageType::NV12); - DEFINE_UNDISTORT(::cvcore::ImageType::NV24); - - // Return error code for unsupported type - GXF_LOG_ERROR("invalid input/output type for image undistort."); - return GXF_FAILURE; -} - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Undistort.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Undistort.hpp deleted file mode 100644 index 377f621..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/Undistort.hpp +++ /dev/null @@ -1,69 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_UNDISTORT_HPP -#define NVIDIA_CVCORE_UNDISTORT_HPP - -#include "CameraModel.hpp" -#include "Frame3D.hpp" -#include "TensorOperator.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { - -// Undistort operator. -class UndistortBase : public TensorOperator { -public: - virtual ~UndistortBase() {} - - gxf_result_t start() override final; - gxf_result_t stop() override final; - - gxf_result_t registerInterface(gxf::Registrar* registrar) override; - -private: - gxf::Expected doInferOutputInfo(gxf::Entity& input) override final; - gxf_result_t doUpdateCameraMessage(gxf::Handle& output, - gxf::Handle& input) override final; - gxf_result_t doExecute(gxf::Entity& output, gxf::Entity& input, cudaStream_t stream, const char* output_name, - const char* input_name) override final; - - gxf::Parameter> input_camera_model_; - gxf::Parameter> reference_frame_; - gxf::Parameter> output_camera_model_; - gxf::Parameter> regions_width_; - gxf::Parameter> regions_height_; - gxf::Parameter> horizontal_intervals_; - gxf::Parameter> vertical_intervals_; - gxf::Parameter interp_type_; - gxf::Parameter border_type_; - - ::cvcore::tensor_ops::ImageGrid image_grid_; - ::cvcore::tensor_ops::ImageWarp image_warp_; - gxf::Vector2u output_shape_; - - ::cvcore::CameraModel input_camera_info_; - ::cvcore::CameraIntrinsics output_camera_intrinsics_; -}; - -class StreamUndistort : public UndistortBase {}; - -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterTensorImpl.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterTensorImpl.cpp deleted file mode 100644 index 37099fc..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterTensorImpl.cpp +++ /dev/null @@ -1,105 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include "ImageAdapterTensorImpl.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { -namespace detail { - -gxf::Expected> GetHWCIndices(const ::cvcore::ImageType type) { - switch (type) { - case ::cvcore::ImageType::Y_U8: - case ::cvcore::ImageType::Y_U16: - case ::cvcore::ImageType::Y_F32: - case ::cvcore::ImageType::RGB_U8: - case ::cvcore::ImageType::BGR_U8: - case ::cvcore::ImageType::RGB_U16: - case ::cvcore::ImageType::BGR_U16: - case ::cvcore::ImageType::RGB_F32: - case ::cvcore::ImageType::BGR_F32: { - return std::make_tuple(0, 1, 2); - } - case ::cvcore::ImageType::PLANAR_RGB_U8: - case ::cvcore::ImageType::PLANAR_BGR_U8: - case ::cvcore::ImageType::PLANAR_RGB_U16: - case ::cvcore::ImageType::PLANAR_BGR_U16: - case ::cvcore::ImageType::PLANAR_RGB_F32: - case ::cvcore::ImageType::PLANAR_BGR_F32: { - return std::make_tuple(1, 2, 0); - } - default: { - GXF_LOG_ERROR("invalid image type."); - return gxf::Unexpected{GXF_FAILURE}; - } - } -} - -gxf::Expected GetPrimitiveType(const ::cvcore::ImageType image_type) { - switch (image_type) { - case ::cvcore::ImageType::Y_U8: - case ::cvcore::ImageType::RGB_U8: - case ::cvcore::ImageType::BGR_U8: - case ::cvcore::ImageType::PLANAR_RGB_U8: - case ::cvcore::ImageType::PLANAR_BGR_U8: { - return gxf::PrimitiveType::kUnsigned8; - } - case ::cvcore::ImageType::Y_U16: - case ::cvcore::ImageType::RGB_U16: - case ::cvcore::ImageType::BGR_U16: - case ::cvcore::ImageType::PLANAR_RGB_U16: - case ::cvcore::ImageType::PLANAR_BGR_U16: { - return gxf::PrimitiveType::kUnsigned16; - } - case ::cvcore::ImageType::Y_F32: - case ::cvcore::ImageType::RGB_F32: - case ::cvcore::ImageType::BGR_F32: - case ::cvcore::ImageType::PLANAR_RGB_F32: - case ::cvcore::ImageType::PLANAR_BGR_F32: { - return gxf::PrimitiveType::kFloat32; - } - default: { - GXF_LOG_ERROR("invalid image type."); - return gxf::Unexpected{GXF_FAILURE}; - } - } -} - -gxf::Expected GetTensorInfo(gxf::Handle tensor, const ::cvcore::ImageType type) { - const auto& shape = tensor->shape(); - const auto rank = tensor->rank(); - const auto storage_type = tensor->storage_type(); - - if (rank != 3) { - GXF_LOG_ERROR("unexpected tensor shape."); - return gxf::Unexpected{GXF_FAILURE}; - } - - const auto indices = GetHWCIndices(type); - if (!indices) { - return gxf::Unexpected{GXF_FAILURE}; - } - const size_t width = shape.dimension(std::get<1>(indices.value())); - const size_t height = shape.dimension(std::get<0>(indices.value())); - - return ImageInfo{type, width, height, storage_type != gxf::MemoryStorageType::kDevice}; -} - -} // namespace detail -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterTensorImpl.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterTensorImpl.hpp deleted file mode 100644 index 45a060a..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterTensorImpl.hpp +++ /dev/null @@ -1,105 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_TENSOR_ADAPTER_HPP -#define NVIDIA_CVCORE_TENSOR_ADAPTER_HPP - -#include "../ImageUtils.hpp" - -#include - -#include "cv/core/Image.h" -#include "gxf/std/allocator.hpp" -#include "gxf/std/tensor.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { -namespace detail { - -gxf::Expected> GetHWCIndices(const ::cvcore::ImageType type); - -gxf::Expected GetPrimitiveType(const ::cvcore::ImageType image_type); - -gxf::Expected GetTensorInfo(gxf::Handle tensor, const ::cvcore::ImageType type); - -template<::cvcore::ImageType T, - typename std::enable_if::type* = nullptr> -gxf::Expected<::cvcore::Image> WrapImageFromTensor(gxf::Handle tensor) { - const auto info = GetTensorInfo(tensor, T); - if (!info) { - return gxf::Unexpected{GXF_FAILURE}; - } - using D = typename ::cvcore::detail::ChannelTypeToNative<::cvcore::ImageTraits::CT>::Type; - auto pointer = tensor->data(); - if (!pointer) { - return gxf::Unexpected{GXF_FAILURE}; - } - const size_t stride = tensor->stride(std::get<0>(GetHWCIndices(T).value())); - return ::cvcore::Image(info.value().width, info.value().height, stride, pointer.value(), info.value().is_cpu); -} - -template<::cvcore::ImageType T, - typename std::enable_if::type* = nullptr> -gxf::Expected<::cvcore::Image> WrapImageFromTensor(gxf::Handle tensor) { - GXF_LOG_ERROR("NV12/NV24 not supported for gxf::Tensor"); - return gxf::Unexpected{GXF_FAILURE}; -} - -template<::cvcore::ImageType T, - typename std::enable_if::type* = nullptr> -gxf_result_t AllocateTensor(gxf::Handle tensor, size_t width, size_t height, - gxf::Handle allocator, bool is_cpu, bool allocate_pitch_linear) { - const auto primitive_type = GetPrimitiveType(T); - if (!primitive_type) { - return primitive_type.error(); - } - - const auto indices = GetHWCIndices(T); - if (!indices) { - return GXF_FAILURE; - } - std::array dims; - dims[std::get<0>(indices.value())] = height; - dims[std::get<1>(indices.value())] = width; - dims[std::get<2>(indices.value())] = ::cvcore::detail::ChannelToCount<::cvcore::ImageTraits::CC>(); - const gxf::Shape shape(dims, 3); - - auto result = - tensor->reshapeCustom(shape, primitive_type.value(), gxf::PrimitiveTypeSize(primitive_type.value()), - gxf::Unexpected{GXF_UNINITIALIZED_VALUE}, - is_cpu ? gxf::MemoryStorageType::kHost : gxf::MemoryStorageType::kDevice, allocator); - if (!result) { - GXF_LOG_ERROR("reshape tensor failed."); - return GXF_FAILURE; - } - return GXF_SUCCESS; -} - -template<::cvcore::ImageType T, - typename std::enable_if::type* = nullptr> -gxf_result_t AllocateTensor(gxf::Handle tensor, size_t width, size_t height, - gxf::Handle allocator, bool is_cpu, bool allocate_pitch_linear) { - GXF_LOG_ERROR("NV12/NV24 not supported for gxf::Tensor"); - return GXF_FAILURE; -} - -} // namespace detail -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterVideoBufferImpl.cpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterVideoBufferImpl.cpp deleted file mode 100644 index ac257b4..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterVideoBufferImpl.cpp +++ /dev/null @@ -1,88 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#include "ImageAdapterVideoBufferImpl.hpp" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { -namespace detail { - -gxf::Expected<::cvcore::ImageType> GetImageTypeFromVideoFormat(const gxf::VideoFormat format) { - switch (format) { - case gxf::VideoFormat::GXF_VIDEO_FORMAT_NV12: - case gxf::VideoFormat::GXF_VIDEO_FORMAT_NV12_ER: { - return ::cvcore::ImageType::NV12; - } - case gxf::VideoFormat::GXF_VIDEO_FORMAT_NV24: - case gxf::VideoFormat::GXF_VIDEO_FORMAT_NV24_ER: { - return ::cvcore::ImageType::NV24; - } - case gxf::VideoFormat::GXF_VIDEO_FORMAT_RGBA: { - return ::cvcore::ImageType::RGBA_U8; - } - case gxf::VideoFormat::GXF_VIDEO_FORMAT_RGB: { - return ::cvcore::ImageType::RGB_U8; - } - case gxf::VideoFormat::GXF_VIDEO_FORMAT_RGB32: { - return ::cvcore::ImageType::RGB_F32; - } - case gxf::VideoFormat::GXF_VIDEO_FORMAT_BGR: { - return ::cvcore::ImageType::BGR_U8; - } - case gxf::VideoFormat::GXF_VIDEO_FORMAT_BGR32: { - return ::cvcore::ImageType::BGR_F32; - } - case gxf::VideoFormat::GXF_VIDEO_FORMAT_R8_G8_B8: { - return ::cvcore::ImageType::PLANAR_RGB_U8; - } - case gxf::VideoFormat::GXF_VIDEO_FORMAT_R32_G32_B32: { - return ::cvcore::ImageType::PLANAR_RGB_F32; - } - case gxf::VideoFormat::GXF_VIDEO_FORMAT_B8_G8_R8: { - return ::cvcore::ImageType::PLANAR_BGR_U8; - } - case gxf::VideoFormat::GXF_VIDEO_FORMAT_B32_G32_R32: { - return ::cvcore::ImageType::PLANAR_BGR_F32; - } - case gxf::VideoFormat::GXF_VIDEO_FORMAT_GRAY: { - return ::cvcore::ImageType::Y_U8; - } - case gxf::VideoFormat::GXF_VIDEO_FORMAT_GRAY32: { - return ::cvcore::ImageType::Y_F32; - } - default: { - GXF_LOG_ERROR("invalid video format."); - return gxf::Unexpected{GXF_FAILURE}; - } - } -} - -gxf::Expected GetVideoBufferInfo(gxf::Handle video_buffer) { - const auto buffer_info = video_buffer->video_frame_info(); - const auto storage_type = video_buffer->storage_type(); - auto image_type = GetImageTypeFromVideoFormat(buffer_info.color_format); - if (!image_type) { - return gxf::Unexpected{GXF_FAILURE}; - } - return ImageInfo{image_type.value(), buffer_info.width, buffer_info.height, - storage_type != gxf::MemoryStorageType::kDevice}; -} - -} // namespace detail -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterVideoBufferImpl.hpp b/isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterVideoBufferImpl.hpp deleted file mode 100644 index 6664640..0000000 --- a/isaac_ros_ess/gxf/ess/extensions/tensor_ops/detail/ImageAdapterVideoBufferImpl.hpp +++ /dev/null @@ -1,294 +0,0 @@ -// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// SPDX-License-Identifier: Apache-2.0 -#ifndef NVIDIA_CVCORE_VIDEO_BUFFER_ADAPTER_HPP -#define NVIDIA_CVCORE_VIDEO_BUFFER_ADAPTER_HPP - -#include "../ImageUtils.hpp" - -#include "gxf/multimedia/video.hpp" -#include "gxf/std/allocator.hpp" - -#include "cv/core/Image.h" - -namespace nvidia { -namespace cvcore { -namespace tensor_ops { -namespace detail { - -gxf::Expected<::cvcore::ImageType> GetImageTypeFromVideoFormat(const gxf::VideoFormat format); - -gxf::Expected GetVideoBufferInfo(gxf::Handle video_buffer); - -template<::cvcore::ImageType T, - typename std::enable_if::type* = nullptr> -gxf::Expected<::cvcore::Image> WrapImageFromVideoBuffer(gxf::Handle video_buffer) { - const auto info = GetVideoBufferInfo(video_buffer); - if (!info) { - return gxf::Unexpected{GXF_FAILURE}; - } - using D = typename ::cvcore::detail::ChannelTypeToNative<::cvcore::ImageTraits::CT>::Type; - auto pointer = reinterpret_cast(video_buffer->pointer()); - if (!pointer) { - return gxf::Unexpected{GXF_FAILURE}; - } - const auto& color_planes = video_buffer->video_frame_info().color_planes; - return ::cvcore::Image(info.value().width, info.value().height, color_planes[0].stride, pointer, - info.value().is_cpu); -} - -template<::cvcore::ImageType T, - typename std::enable_if::type* = nullptr> -gxf::Expected<::cvcore::Image> WrapImageFromVideoBuffer(gxf::Handle video_buffer) { - const auto info = GetVideoBufferInfo(video_buffer); - if (!info) { - return gxf::Unexpected{GXF_FAILURE}; - } - // Note only U8 is supported in NV12/NV24 - auto pointer = reinterpret_cast(video_buffer->pointer()); - if (!pointer) { - return gxf::Unexpected{GXF_FAILURE}; - } - const auto& color_planes = video_buffer->video_frame_info().color_planes; - return ::cvcore::Image(info.value().width, info.value().height, color_planes[0].stride, color_planes[1].stride, - pointer, pointer + color_planes[1].offset, info.value().is_cpu); -} - -template<::cvcore::ImageType T> -struct ImageTypeToVideoFormat { - static constexpr gxf::VideoFormat format = gxf::VideoFormat::GXF_VIDEO_FORMAT_CUSTOM; -}; - -template<> -struct ImageTypeToVideoFormat<::cvcore::ImageType::NV12> { - static constexpr gxf::VideoFormat format = gxf::VideoFormat::GXF_VIDEO_FORMAT_NV12_ER; -}; - -template<> -struct ImageTypeToVideoFormat<::cvcore::ImageType::NV24> { - static constexpr gxf::VideoFormat format = gxf::VideoFormat::GXF_VIDEO_FORMAT_NV24_ER; -}; - -template<> -struct ImageTypeToVideoFormat<::cvcore::ImageType::RGBA_U8> { - static constexpr gxf::VideoFormat format = gxf::VideoFormat::GXF_VIDEO_FORMAT_RGBA; -}; - -template<> -struct ImageTypeToVideoFormat<::cvcore::ImageType::RGB_U8> { - static constexpr gxf::VideoFormat format = gxf::VideoFormat::GXF_VIDEO_FORMAT_RGB; -}; - -template<> -struct ImageTypeToVideoFormat<::cvcore::ImageType::RGB_F32> { - static constexpr gxf::VideoFormat format = gxf::VideoFormat::GXF_VIDEO_FORMAT_RGB32; -}; - -template<> -struct ImageTypeToVideoFormat<::cvcore::ImageType::BGR_U8> { - static constexpr gxf::VideoFormat format = gxf::VideoFormat::GXF_VIDEO_FORMAT_BGR; -}; - -template<> -struct ImageTypeToVideoFormat<::cvcore::ImageType::BGR_F32> { - static constexpr gxf::VideoFormat format = gxf::VideoFormat::GXF_VIDEO_FORMAT_BGR32; -}; - -template<> -struct ImageTypeToVideoFormat<::cvcore::ImageType::PLANAR_RGB_U8> { - static constexpr gxf::VideoFormat format = gxf::VideoFormat::GXF_VIDEO_FORMAT_R8_G8_B8; -}; - -template<> -struct ImageTypeToVideoFormat<::cvcore::ImageType::PLANAR_RGB_F32> { - static constexpr gxf::VideoFormat format = gxf::VideoFormat::GXF_VIDEO_FORMAT_R32_G32_B32; -}; - -template<> -struct ImageTypeToVideoFormat<::cvcore::ImageType::PLANAR_BGR_U8> { - static constexpr gxf::VideoFormat format = gxf::VideoFormat::GXF_VIDEO_FORMAT_B8_G8_R8; -}; - -template<> -struct ImageTypeToVideoFormat<::cvcore::ImageType::PLANAR_BGR_F32> { - static constexpr gxf::VideoFormat format = gxf::VideoFormat::GXF_VIDEO_FORMAT_B32_G32_R32; -}; - -template<> -struct ImageTypeToVideoFormat<::cvcore::ImageType::Y_U8> { - static constexpr gxf::VideoFormat format = gxf::VideoFormat::GXF_VIDEO_FORMAT_GRAY; -}; - -template<> -struct ImageTypeToVideoFormat<::cvcore::ImageType::Y_F32> { - static constexpr gxf::VideoFormat format = gxf::VideoFormat::GXF_VIDEO_FORMAT_GRAY32; -}; - -template<::cvcore::ImageType T> -struct DefaultNoPaddingColorPlanes {}; - -template<> -struct DefaultNoPaddingColorPlanes<::cvcore::ImageType::NV12> { - DefaultNoPaddingColorPlanes(size_t width) - : planes({gxf::ColorPlane("Y", 1, width), gxf::ColorPlane("UV", 2, width)}) {} - std::array planes; -}; - -template<> -struct DefaultNoPaddingColorPlanes<::cvcore::ImageType::NV24> { - DefaultNoPaddingColorPlanes(size_t width) - : planes({gxf::ColorPlane("Y", 1, width), gxf::ColorPlane("UV", 2, width * 2)}) {} - std::array planes; -}; - -template<> -struct DefaultNoPaddingColorPlanes<::cvcore::ImageType::RGBA_U8> { - DefaultNoPaddingColorPlanes(size_t width) - : planes({gxf::ColorPlane("RGBA", 4, width * 4)}) {} - std::array planes; -}; - -template<> -struct DefaultNoPaddingColorPlanes<::cvcore::ImageType::RGB_U8> { - DefaultNoPaddingColorPlanes(size_t width) - : planes({gxf::ColorPlane("RGB", 3, width * 3)}) {} - std::array planes; -}; - -template<> -struct DefaultNoPaddingColorPlanes<::cvcore::ImageType::RGB_F32> { - DefaultNoPaddingColorPlanes(size_t width) - : planes({gxf::ColorPlane("RGB", 12, width * 12)}) {} - std::array planes; -}; - -template<> -struct DefaultNoPaddingColorPlanes<::cvcore::ImageType::BGR_U8> { - DefaultNoPaddingColorPlanes(size_t width) - : planes({gxf::ColorPlane("BGR", 3, width * 3)}) {} - std::array planes; -}; - -template<> -struct DefaultNoPaddingColorPlanes<::cvcore::ImageType::BGR_F32> { - DefaultNoPaddingColorPlanes(size_t width) - : planes({gxf::ColorPlane("BGR", 12, width * 12)}) {} - std::array planes; -}; - -template<> -struct DefaultNoPaddingColorPlanes<::cvcore::ImageType::PLANAR_RGB_U8> { - DefaultNoPaddingColorPlanes(size_t width) - : planes({gxf::ColorPlane("R", 1, width), gxf::ColorPlane("G", 1, width), gxf::ColorPlane("B", 1, width)}) {} - std::array planes; -}; - -template<> -struct DefaultNoPaddingColorPlanes<::cvcore::ImageType::PLANAR_RGB_F32> { - DefaultNoPaddingColorPlanes(size_t width) - : planes( - {gxf::ColorPlane("R", 4, width * 4), gxf::ColorPlane("G", 4, width * 4), gxf::ColorPlane("B", 4, width * 4)}) {} - std::array planes; -}; - -template<> -struct DefaultNoPaddingColorPlanes<::cvcore::ImageType::PLANAR_BGR_U8> { - DefaultNoPaddingColorPlanes(size_t width) - : planes({gxf::ColorPlane("B", 1, width), gxf::ColorPlane("G", 1, width), gxf::ColorPlane("R", 1, width)}) {} - std::array planes; -}; - -template<> -struct DefaultNoPaddingColorPlanes<::cvcore::ImageType::PLANAR_BGR_F32> { - DefaultNoPaddingColorPlanes(size_t width) - : planes( - {gxf::ColorPlane("B", 4, width * 4), gxf::ColorPlane("G", 4, width * 4), gxf::ColorPlane("R", 4, width * 4)}) {} - std::array planes; -}; - -template<> -struct DefaultNoPaddingColorPlanes<::cvcore::ImageType::Y_U8> { - DefaultNoPaddingColorPlanes(size_t width) - : planes({gxf::ColorPlane("gray", 1, width)}) {} - std::array planes; -}; - -template<> -struct DefaultNoPaddingColorPlanes<::cvcore::ImageType::Y_F32> { - DefaultNoPaddingColorPlanes(size_t width) - : planes({gxf::ColorPlane("gray", 4, width * 4)}) {} - std::array planes; -}; - -// This include the list of image types that GXF supported so far -constexpr bool IsValidGXFImageType(const ::cvcore::ImageType type) { - return type == ::cvcore::ImageType::NV12 || type == ::cvcore::ImageType::NV24 || - type == ::cvcore::ImageType::RGBA_U8 || type == ::cvcore::ImageType::RGB_U8 || - type == ::cvcore::ImageType::BGR_U8 || type == ::cvcore::ImageType::RGB_F32 || - type == ::cvcore::ImageType::BGR_F32 || type == ::cvcore::ImageType::PLANAR_RGB_U8 || - type == ::cvcore::ImageType::PLANAR_BGR_U8 || type == ::cvcore::ImageType::PLANAR_RGB_F32 || - type == ::cvcore::ImageType::PLANAR_BGR_F32 || type == ::cvcore::ImageType::Y_U8 || - type == ::cvcore::ImageType::Y_F32; -} - -template<::cvcore::ImageType T, typename std::enable_if::type* = nullptr> -gxf_result_t AllocateVideoBuffer(gxf::Handle video_buffer, size_t width, size_t height, - gxf::Handle allocator, bool is_cpu, bool allocate_pitch_linear) { - if (width % 2 != 0 || height % 2 != 0) { - GXF_LOG_ERROR("image width/height must be even for creation of gxf::VideoBuffer"); - return GXF_FAILURE; - } - if (allocate_pitch_linear) { - auto result = video_buffer->resize::format>( - static_cast(width), static_cast(height), gxf::SurfaceLayout::GXF_SURFACE_LAYOUT_PITCH_LINEAR, - is_cpu ? gxf::MemoryStorageType::kHost : gxf::MemoryStorageType::kDevice, allocator); - - if (!result) { - GXF_LOG_ERROR("resize VideoBuffer failed."); - return GXF_FAILURE; - } - } else { - DefaultNoPaddingColorPlanes planes_trait(width); - gxf::VideoFormatSize::format> buffer_type_trait; - uint64_t size = buffer_type_trait.size(width, height, planes_trait.planes); - std::vector planes_filled{planes_trait.planes.begin(), planes_trait.planes.end()}; - gxf::VideoBufferInfo buffer_info{static_cast(width), static_cast(height), - ImageTypeToVideoFormat::format, planes_filled, - gxf::SurfaceLayout::GXF_SURFACE_LAYOUT_PITCH_LINEAR}; - auto result = video_buffer->resizeCustom( - buffer_info, size, is_cpu ? gxf::MemoryStorageType::kHost : gxf::MemoryStorageType::kDevice, allocator); - - if (!result) { - GXF_LOG_ERROR("custom resize VideoBuffer failed."); - return GXF_FAILURE; - } - } - return GXF_SUCCESS; -} - -template<::cvcore::ImageType T, typename std::enable_if::type* = nullptr> -gxf_result_t AllocateVideoBuffer(gxf::Handle video_buffer, size_t width, size_t height, - gxf::Handle allocator, bool is_cpu, bool allocate_pitch_linear) { - GXF_LOG_ERROR("image type not supported in gxf::VideoBuffer"); - return GXF_FAILURE; -} - -} // namespace detail -} // namespace tensor_ops -} // namespace cvcore -} // namespace nvidia - -#endif diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/Errors.cpp b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/Errors.cpp similarity index 86% rename from isaac_ros_ess/gxf/ess/cvcore/src/inferencer/Errors.cpp rename to isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/Errors.cpp index f6135a9..2fe26d6 100644 --- a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/Errors.cpp +++ b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/Errors.cpp @@ -15,34 +15,31 @@ // // SPDX-License-Identifier: Apache-2.0 -#include "cv/inferencer/Errors.h" +#include +#include "gems/dnn_inferencer/inferencer/Errors.h" #ifndef __cpp_lib_to_underlying namespace std { template -constexpr underlying_type_t to_underlying(Enum e) noexcept -{ +constexpr underlying_type_t to_underlying(Enum e) noexcept { return static_cast>(e); } }; // namespace std -#endif // __cpp_lib_to_underlying +#endif // __cpp_lib_to_underlying -namespace cvcore { namespace inferencer { +namespace cvcore { +namespace inferencer { namespace detail { -struct InferencerErrorCategory : std::error_category -{ - virtual const char *name() const noexcept override final - { +struct InferencerErrorCategory : std::error_category { + const char* name() const noexcept final { return "cvcore-inferencer-error"; } - virtual std::string message(int value) const override final - { + std::string message(int value) const final { std::string result; - switch (value) - { + switch (value) { case std::to_underlying(InferencerErrorCode::SUCCESS): result = "(SUCCESS) No errors detected"; break; @@ -53,10 +50,12 @@ struct InferencerErrorCategory : std::error_category result = "(INVALID_OPERATION) Invalid operation performed"; break; case std::to_underlying(InferencerErrorCode::TRITON_SERVER_NOT_READY): - result = "(TRITON_SERVER_NOT_READY) Triton server is not live or the serverUrl is incorrect"; + result = "(TRITON_SERVER_NOT_READY) Triton server is not live or the serverUrl" + "is incorrect"; break; case std::to_underlying(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR): - result = "(TRITON_CUDA_SHARED_MEMORY_ERROR) Unable to map/unmap cuda shared memory for triton server"; + result = "(TRITON_CUDA_SHARED_MEMORY_ERROR) Unable to map/unmap cuda shared memory" + "for triton server"; break; case std::to_underlying(InferencerErrorCode::TRITON_INFERENCE_ERROR): result = "(TRITON_INFERENCE_ERROR) Error during inference using triton API"; @@ -77,12 +76,11 @@ struct InferencerErrorCategory : std::error_category return result; } - virtual std::error_condition default_error_condition(int code) const noexcept override final - { + std::error_condition default_error_condition(int code) const noexcept final { std::error_condition result; + using ErrorCode = cvcore::tensor_ops::ErrorCode; - switch (code) - { + switch (code) { case std::to_underlying(InferencerErrorCode::SUCCESS): result = ErrorCode::SUCCESS; break; @@ -118,12 +116,13 @@ struct InferencerErrorCategory : std::error_category return result; } }; -} // namespace detail +} // namespace detail const detail::InferencerErrorCategory errorCategory{}; -std::error_code make_error_code(InferencerErrorCode ec) noexcept -{ +std::error_code make_error_code(InferencerErrorCode ec) noexcept { return {std::to_underlying(ec), errorCategory}; } -}} // namespace cvcore::inferencer + +} // namespace inferencer +} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/inferencer/Errors.h b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/Errors.h similarity index 77% rename from isaac_ros_ess/gxf/ess/cvcore/include/cv/inferencer/Errors.h rename to isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/Errors.h index c569f10..39da91f 100644 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/inferencer/Errors.h +++ b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/Errors.h @@ -15,18 +15,17 @@ // // SPDX-License-Identifier: Apache-2.0 -#ifndef CVCORE_ERRORS_H -#define CVCORE_ERRORS_H +#pragma once -#include "cv/core/CVError.h" +#include "extensions/tensorops/core/CVError.h" -namespace cvcore { namespace inferencer { +namespace cvcore { +namespace inferencer { /* * Enum class describing the inference error codes. */ -enum class InferencerErrorCode : std::int32_t -{ +enum class InferencerErrorCode : std::int32_t { SUCCESS = 0, INVALID_ARGUMENT, INVALID_OPERATION, @@ -36,23 +35,24 @@ enum class InferencerErrorCode : std::int32_t TRITON_INFERENCE_ERROR, TRITON_REGISTER_LAYER_ERROR, TENSORRT_INFERENCE_ERROR, + TENSORRT_ENGINE_ERROR }; -}} // namespace cvcore::inferencer +} // namespace inferencer +} // namespace cvcore namespace std { template<> -struct is_error_code_enum : true_type -{ +struct is_error_code_enum : true_type { }; -} // namespace std +} // namespace std -namespace cvcore { namespace inferencer { +namespace cvcore { +namespace inferencer { std::error_code make_error_code(InferencerErrorCode) noexcept; -}} // namespace cvcore::inferencer - -#endif // CVCORE_ERRORS_H +} // namespace inferencer +} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/inferencer/IInferenceBackend.h b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/IInferenceBackend.h similarity index 71% rename from isaac_ros_ess/gxf/ess/cvcore/include/cv/inferencer/IInferenceBackend.h rename to isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/IInferenceBackend.h index 7213a06..9212e5e 100644 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/inferencer/IInferenceBackend.h +++ b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/IInferenceBackend.h @@ -15,8 +15,7 @@ // // SPDX-License-Identifier: Apache-2.0 -#ifndef CVCORE_IINFERENCEBACKEND_H -#define CVCORE_IINFERENCEBACKEND_H +#pragma once #include #include @@ -26,38 +25,37 @@ #include #include -#include "cv/core/Tensor.h" +#include +#include "extensions/tensorops/core/Tensor.h" -namespace cvcore { namespace inferencer { +namespace cvcore { +namespace inferencer { /** * Struct type to describe input and output layers. */ -struct LayerInfo -{ +struct LayerInfo { size_t index; /**< Block Index of layer */ std::string name; /**< Name of layer */ std::vector shape; /**< Shape of layer */ - cvcore::ChannelType dataType; /**< Datatype of layer */ - cvcore::TensorLayout layout; /**< Tensor layour of layer */ + cvcore::tensor_ops::ChannelType dataType; /**< Datatype of layer */ + cvcore::tensor_ops::TensorLayout layout; /**< Tensor layour of layer */ size_t layerSize; }; /** * Enum class to describe the backend protocol for triton */ -enum class BackendProtocol -{ - GRPC, /**< GRPC protocol */ - HTTP /**< HTTP Protocol */ +enum class BackendProtocol { + GRPC, + HTTP }; /** * Struct type to describe the input for triton inference. */ -struct TritonRemoteInferenceParams -{ - std::string serverUrl; /**< Server url created by running the triton server for a give model path */ +struct TritonRemoteInferenceParams { + std::string serverUrl; /**< Server url created by running the triton server */ bool verbose; /**< Verbose log from backend */ BackendProtocol protocolType; /**< Backend protocol type */ std::string modelName; /**< Model name as per model respoitory */ @@ -67,11 +65,10 @@ struct TritonRemoteInferenceParams /** * Struct type to describe the model metadata parsed by the inference backend. */ -struct ModelMetaData -{ +struct ModelMetaData { std::string modelName; /**< Model name */ std::string modelVersion; /**< Model version */ - std::unordered_map inputLayers; /**< Map of input layer information indexed by layer name */ + std::unordered_map inputLayers; /**< Map of input layer */ std::unordered_map outputLayers; /**< Map of output layer information indexed by layer name */ size_t maxBatchSize; /**< Maximum batch size */ @@ -80,8 +77,7 @@ struct ModelMetaData /** * Enum type for TensorRT inference type */ -enum class TRTInferenceType -{ +enum class TRTInferenceType { TRT_ENGINE, /**< Inference using TRT engine file */ TRT_ENGINE_IN_MEMORY /**< Inference using TRT Cuda Engine */ }; @@ -89,26 +85,37 @@ enum class TRTInferenceType /** * TRT Logger */ -class TRTLogger : public nvinfer1::ILogger -{ -public: - void log(Severity severity, const char *msg) noexcept - { - if ((severity == Severity::kERROR) || (severity == Severity::kWARNING)) - { +class TRTLogger : public nvinfer1::ILogger { + public: + void log(Severity severity, const char* msg) noexcept { + if ((severity == Severity::kERROR) || (severity == Severity::kWARNING)) { std::cerr << msg << std::endl; } } }; +/** + * Enum class to describe the model build flags + */ +enum OnnxModelBuildFlag { + NONE = 0, + kINT8 = 1, + kFP16 = 2, + kGPU_FALLBACK = 4, +}; + /** * Struct type to describe the input for triton inference. */ -struct TensorRTInferenceParams -{ +struct TensorRTInferenceParams { TRTInferenceType inferType; /**< TensorRT inference type */ - nvinfer1::ICudaEngine *engine; /**< Cuda engine file for TRT_ENGINE_IN_MEMORY type. Nullptr if TRT_ENGINE is used */ - std::string engineFilePath; /**< Engine file path for TRT_ENGINE type. Set to null otherwise */ + nvinfer1::ICudaEngine* engine; /**< Cuda engine file for TRT_ENGINE_IN_MEMORY type. */ + // Nullptr if TRT_ENGINE is used + std::string onnxFilePath; /**< ONNX model file path. */ + std::string engineFilePath; /**< Engine file path for TRT_ENGINE type. */ + bool force_engine_update; + int32_t buildFlags; + int64_t max_workspace_size; std::size_t maxBatchSize; /**< Max Batch size */ std::vector inputLayerNames; /** Input layer names */ std::vector outputLayerNames; /** Output layer names */ @@ -118,9 +125,8 @@ struct TensorRTInferenceParams /** * Interface for TritonRemote , Triton C and Native TensorRT inference backend. */ -class IInferenceBackendClient -{ -public: +class IInferenceBackendClient { + public: virtual ~IInferenceBackendClient() noexcept = default; /** @@ -129,7 +135,8 @@ class IInferenceBackendClient * @param inputLayerName Input Layer name * @return error code */ - virtual std::error_code setInput(const cvcore::TensorBase &trtInputBuffer, std::string inputLayerName) = 0; + virtual std::error_code setInput(const cvcore::tensor_ops::TensorBase& trtInputBuffer, + std::string inputLayerName) = 0; /** * Function to set output layer data @@ -137,7 +144,8 @@ class IInferenceBackendClient * @param outputLayerName Output Layer name * @return error code */ - virtual std::error_code setOutput(cvcore::TensorBase &trtOutputBuffer, std::string outputLayerName) = 0; + virtual std::error_code setOutput(cvcore::tensor_ops::TensorBase& trtOutputBuffer, + std::string outputLayerName) = 0; /** * Returns the model metadata parsed by the inference backend. @@ -147,7 +155,7 @@ class IInferenceBackendClient /** * Inference based on input and output set. enqueueV2 for TensorRT and inferSync for Triton. - * @param Batch size of input for inference. Default set to 1. Must be <= Max Batch Size used for buffers. + * @param Batch size of input for inference. Default set to 1. Must be <= Max Batch Size . * @return error code */ virtual std::error_code infer(size_t batchSize = 1) = 0; @@ -157,7 +165,7 @@ class IInferenceBackendClient * @param cudaStream_t cuda input stream * @return error code */ - virtual std::error_code setCudaStream(cudaStream_t) = 0; // Only in TRT + virtual std::error_code setCudaStream(cudaStream_t) = 0; /** * Unregisters the tensor mapped from the inference backend @@ -172,14 +180,15 @@ class IInferenceBackendClient */ virtual std::error_code unregister() = 0; -protected: + protected: IInferenceBackendClient() = default; - IInferenceBackendClient(const IInferenceBackendClient &) = default; - IInferenceBackendClient &operator=(const IInferenceBackendClient &) = default; + IInferenceBackendClient(const IInferenceBackendClient&) = default; + IInferenceBackendClient& operator=(const IInferenceBackendClient&) = default; IInferenceBackendClient(IInferenceBackendClient &&) noexcept = default; - IInferenceBackendClient &operator=(IInferenceBackendClient &&) noexcept = default; + IInferenceBackendClient& operator=(IInferenceBackendClient &&) noexcept = default; }; using InferenceBackendClient = IInferenceBackendClient *; -}} // namespace cvcore::inferencer -#endif + +} // namespace inferencer +} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/Inferencer.cpp b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/Inferencer.cpp similarity index 69% rename from isaac_ros_ess/gxf/ess/cvcore/src/inferencer/Inferencer.cpp rename to isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/Inferencer.cpp index 2ce6a2d..ba4a590 100644 --- a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/Inferencer.cpp +++ b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/Inferencer.cpp @@ -15,16 +15,22 @@ // // SPDX-License-Identifier: Apache-2.0 -#include "cv/inferencer/Inferencer.h" +#include "gems/dnn_inferencer/inferencer/Inferencer.h" #include +#include +#include +#include -#include "cv/inferencer/Errors.h" -#include "cv/inferencer/IInferenceBackend.h" -#include "tensorrt/TensorRTInferencer.h" -#include "triton/TritonGrpcInferencer.h" +#include "Errors.h" +#include "IInferenceBackend.h" +#include "TensorRTInferencer.h" +#include "TritonGrpcInferencer.h" + +namespace cvcore { +namespace inferencer { +using ErrorCode = cvcore::tensor_ops::ErrorCode; -namespace cvcore { namespace inferencer { std::mutex InferenceBackendFactory::inferenceMutex; #ifdef ENABLE_TRITON @@ -32,52 +38,42 @@ std::unordered_map> InferenceBackendFactory::tritonRemoteMap; std::error_code InferenceBackendFactory::CreateTritonRemoteInferenceBackendClient( - InferenceBackendClient &client, const TritonRemoteInferenceParams ¶ms) -{ + InferenceBackendClient& client, const TritonRemoteInferenceParams& params) { std::lock_guard instanceLock(inferenceMutex); - if (params.protocolType == BackendProtocol::HTTP) - { + if (params.protocolType == BackendProtocol::HTTP) { return ErrorCode::NOT_IMPLEMENTED; } std::error_code result = ErrorCode::SUCCESS; std::string hashString = params.serverUrl + params.modelName + params.modelVersion; - try - { - if (tritonRemoteMap.find(hashString) != tritonRemoteMap.end()) - { + try { + if (tritonRemoteMap.find(hashString) != tritonRemoteMap.end()) { client = tritonRemoteMap[hashString].second; tritonRemoteMap[hashString].first++; - } - else - { + } else { tritonRemoteMap[hashString] = - std::make_pair(1, new TritonGrpcInferencer(params)); + std::pair(1, + new TritonGrpcInferencer(params)); } } - catch (std::error_code &e) - { + catch (std::error_code &e) { result = e; } - catch (...) - { + catch (...) { result = ErrorCode::INVALID_ARGUMENT; } client = tritonRemoteMap[hashString].second; return result; } -std::error_code InferenceBackendFactory::DestroyTritonRemoteInferenceBackendClient(InferenceBackendClient &client) -{ +std::error_code InferenceBackendFactory::DestroyTritonRemoteInferenceBackendClient( + InferenceBackendClient& client) { std::lock_guard instanceLock(inferenceMutex); - for (auto &it : tritonRemoteMap) - { - if (it.second.second == client) - { + for (auto &it : tritonRemoteMap) { + if (it.second.second == client) { it.second.first--; - if (it.second.first == 0) - { + if (it.second.first == 0) { tritonRemoteMap.erase(it.first); client->unregister(); delete client; @@ -91,33 +87,28 @@ std::error_code InferenceBackendFactory::DestroyTritonRemoteInferenceBackendClie } #endif -std::error_code InferenceBackendFactory::CreateTensorRTInferenceBackendClient(InferenceBackendClient &client, - const TensorRTInferenceParams ¶ms) -{ +std::error_code InferenceBackendFactory::CreateTensorRTInferenceBackendClient( + InferenceBackendClient& client, const TensorRTInferenceParams& params) { std::lock_guard instanceLock(inferenceMutex); std::error_code result = ErrorCode::SUCCESS; - try - { + try { client = new TensorRTInferencer(params); } - catch (std::error_code &e) - { + catch (std::error_code &e) { result = e; } - catch (...) - { + catch (...) { result = ErrorCode::INVALID_ARGUMENT; } return result; } -std::error_code InferenceBackendFactory::DestroyTensorRTInferenceBackendClient(InferenceBackendClient &client) -{ +std::error_code InferenceBackendFactory::DestroyTensorRTInferenceBackendClient( + InferenceBackendClient& client) { std::lock_guard instanceLock(inferenceMutex); - if (client != nullptr) - { + if (client != nullptr) { client->unregister(); delete client; client = nullptr; @@ -126,5 +117,5 @@ std::error_code InferenceBackendFactory::DestroyTensorRTInferenceBackendClient(I return ErrorCode::SUCCESS; } - -}} // namespace cvcore::inferencer +} // namespace inferencer +} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/cvcore/include/cv/inferencer/Inferencer.h b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/Inferencer.h similarity index 69% rename from isaac_ros_ess/gxf/ess/cvcore/include/cv/inferencer/Inferencer.h rename to isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/Inferencer.h index ad61131..d07de15 100644 --- a/isaac_ros_ess/gxf/ess/cvcore/include/cv/inferencer/Inferencer.h +++ b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/Inferencer.h @@ -15,65 +15,72 @@ // // SPDX-License-Identifier: Apache-2.0 -#ifndef CVCORE_INFERENCER_H -#define CVCORE_INFERENCER_H +#pragma once #include #include +#include #include +#include -#include "cv/core/Tensor.h" -#include "cv/inferencer/IInferenceBackend.h" +#include "extensions/tensorops/core/Tensor.h" +#include "IInferenceBackend.h" -namespace cvcore { namespace inferencer { +namespace cvcore { +namespace inferencer { /** * A class to create and destroy a client for a given inference backend type */ -class InferenceBackendFactory -{ -public: +class InferenceBackendFactory { + public: #ifdef ENABLE_TRITON /** - * Function to create client for Triton remote inference backend based on the Triton remote inference paramaters. + * Function to create client for Triton remote inference backend based on the Triton remote + * inference paramaters. * @param client client object created * @param Triton remote inference config parameters. * @return error code */ - static std::error_code CreateTritonRemoteInferenceBackendClient(InferenceBackendClient &client, - const TritonRemoteInferenceParams &); + static std::error_code CreateTritonRemoteInferenceBackendClient(InferenceBackendClient& client, + const TritonRemoteInferenceParams&); /** * Function to Destroy the triton grpc client * @param client client object created * @return error code */ - static std::error_code DestroyTritonRemoteInferenceBackendClient(InferenceBackendClient &client); + static std::error_code DestroyTritonRemoteInferenceBackendClient( + InferenceBackendClient& client); #endif /** - * Function to create client for TensorRT inference backend based on the TensorRT inference paramaters. + * Function to create client for TensorRT inference backend based on the TensorRT + * inference paramaters. * @param client client object created * @param TensorRT inference config parameters. * @return error code */ - static std::error_code CreateTensorRTInferenceBackendClient(InferenceBackendClient &client, - const TensorRTInferenceParams &); + static std::error_code CreateTensorRTInferenceBackendClient(InferenceBackendClient& client, + const TensorRTInferenceParams&); /** * Function to Destroy the tensorrt client * @param client client object created * @return error code */ - static std::error_code DestroyTensorRTInferenceBackendClient(InferenceBackendClient &client); + static std::error_code DestroyTensorRTInferenceBackendClient(InferenceBackendClient& client); -private: + private: #ifdef ENABLE_TRITON - // Keeps track of any changes in url/model repo path and returns an existing / new client instance. - static std::unordered_map> tritonRemoteMap; + // Keeps track of any changes in url/model repo path and returns an existing / new client + // instance. + static std::unordered_map> tritonRemoteMap; #endif static std::mutex inferenceMutex; }; -}} // namespace cvcore::inferencer -#endif + +} // namespace inferencer +} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TensorRTInferencer.cpp b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TensorRTInferencer.cpp new file mode 100644 index 0000000..62f9c05 --- /dev/null +++ b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TensorRTInferencer.cpp @@ -0,0 +1,387 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +#include "gems/dnn_inferencer/inferencer/TensorRTInferencer.h" +#include +#include +#include +#include +#include +#include + +#include +#include "gems/dnn_inferencer/inferencer/Errors.h" +#include "gems/dnn_inferencer/inferencer/IInferenceBackend.h" +#include "gems/dnn_inferencer/inferencer/Inferencer.h" +#include "gems/dnn_inferencer/inferencer/TensorRTUtils.h" +#include "gxf/core/expected.hpp" + +namespace cvcore { +namespace inferencer { + +namespace { +// using namespace cvcore::tensor_ops; +using TensorBase = cvcore::tensor_ops::TensorBase; +using ChannelType = cvcore::tensor_ops::ChannelType; +using ChannelCount = cvcore::tensor_ops::ChannelCount; +using ErrorCode = cvcore::tensor_ops::ErrorCode; +size_t getDataSize(const std::vector& shape, ChannelType dataType) { + size_t layerShape = 1; + for (size_t k = 0; k < shape.size(); k++) + layerShape *= shape[k] <= 0 ? 1 : shape[k]; + + return layerShape * GetChannelSize(dataType); +} +} // namespace + +std::error_code TensorRTInferencer::getLayerInfo(LayerInfo& layer, std::string layerName) { + layer.name = layerName; + layer.index = m_inferenceEngine->getBindingIndex(layerName.c_str()); + auto dim = m_inferenceEngine->getBindingDimensions(layer.index); + nvinfer1::TensorFormat tensorFormat = m_inferenceEngine->getBindingFormat(layer.index); + + std::error_code err; + err = getCVCoreChannelLayoutFromTensorRT(layer.layout, tensorFormat); + if (err != make_error_code(ErrorCode::SUCCESS)) { + return ErrorCode::INVALID_ARGUMENT; + } + + for (int32_t cnt = 0; cnt < dim.nbDims; cnt++) { + layer.shape.push_back(dim.d[cnt]); + } + + err = getCVCoreChannelTypeFromTensorRT(layer.dataType, + m_inferenceEngine->getBindingDataType(layer.index)); + layer.layerSize = getDataSize(layer.shape, layer.dataType); + if (err != make_error_code(ErrorCode::SUCCESS)) { + return ErrorCode::INVALID_ARGUMENT; + } + + return ErrorCode::SUCCESS; +} + +std::error_code TensorRTInferencer::ParseTRTModel() { + m_modelInfo.modelName = m_inferenceEngine->getName(); + m_modelInfo.modelVersion = ""; + m_modelInfo.maxBatchSize = m_maxBatchSize; + std::error_code err; + for (size_t i = 0; i < m_inputLayers.size(); i++) { + LayerInfo layer; + err = getLayerInfo(layer, m_inputLayers[i]); + if (err != make_error_code(ErrorCode::SUCCESS)) { + return err; + } + m_modelInfo.inputLayers[layer.name] = layer; + } + for (size_t i = 0; i < m_outputLayers.size(); i++) { + LayerInfo layer; + err = getLayerInfo(layer, m_outputLayers[i]); + if (err != make_error_code(ErrorCode::SUCCESS)) { + return err; + } + m_modelInfo.outputLayers[layer.name] = layer; + } + + return ErrorCode::SUCCESS; +} + +std::error_code TensorRTInferencer::convertModelToEngine(int32_t dla_core, + const char* model_file, int64_t max_workspace_size, int32_t buildFlags, + std::size_t max_batch_size) { + GXF_LOG_INFO("Convert to engine from onnx file: %s", model_file); + // Creates the engine Builder + std::unique_ptr builder(nvinfer1::createInferBuilder(*(m_logger.get()))); + + // Builder Config provides options to the Builder + std::unique_ptr builderConfig(builder->createBuilderConfig()); + builderConfig->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kWORKSPACE, max_workspace_size); + + if (dla_core >= 0) { + builderConfig->setDefaultDeviceType(nvinfer1::DeviceType::kDLA); + builderConfig->setDLACore(dla_core); + } + if (buildFlags & kINT8) { + builderConfig->setFlag(nvinfer1::BuilderFlag::kINT8); + builderConfig->setInt8Calibrator(nullptr); + } + if (buildFlags & OnnxModelBuildFlag::kFP16) { + builderConfig->setFlag(nvinfer1::BuilderFlag::kFP16); + } + + if (buildFlags & kGPU_FALLBACK) { + builderConfig->setFlag(nvinfer1::BuilderFlag::kGPU_FALLBACK); + } + + // Parses ONNX with explicit batch size for support of dynamic shapes/batch + std::unique_ptr network(builder->createNetworkV2( + 1U << static_cast(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH))); + + std::unique_ptr onnx_parser( + nvonnxparser::createParser(*network, *(m_logger.get()))); + if (!onnx_parser->parseFromFile(model_file, + static_cast(nvinfer1::ILogger::Severity::kWARNING))) { + GXF_LOG_ERROR("Failed to parse ONNX file %s", model_file); + return ErrorCode::INVALID_ARGUMENT; + } + + // Provides optimization profile for dynamic size input bindings + nvinfer1::IOptimizationProfile* optimization_profile = builder->createOptimizationProfile(); + // Checks input dimensions and adds to optimization profile if needed + const int number_inputs = network->getNbInputs(); + for (int i = 0; i < number_inputs; ++i) { + auto* bind_tensor = network->getInput(i); + const char* bind_name = bind_tensor->getName(); + nvinfer1::Dims dims = bind_tensor->getDimensions(); + + // Validates binding info + if (dims.nbDims <= 0) { + GXF_LOG_ERROR("Invalid input tensor dimensions for binding %s", bind_name); + return ErrorCode::INVALID_ARGUMENT; + } + for (int j = 1; j < dims.nbDims; ++j) { + if (dims.d[j] <= 0) { + GXF_LOG_ERROR( + "Input binding %s requires dynamic size on dimension No.%d which is not supported", + bind_tensor->getName(), j); + return ErrorCode::INVALID_ARGUMENT; + } + } + if (dims.d[0] == -1) { + // Only case with first dynamic dimension is supported and assumed to be batch size. + // Always optimizes for 1-batch. + dims.d[0] = 1; + optimization_profile->setDimensions(bind_name, nvinfer1::OptProfileSelector::kMIN, dims); + optimization_profile->setDimensions(bind_name, nvinfer1::OptProfileSelector::kOPT, dims); + dims.d[0] = max_batch_size; + if (max_batch_size <= 0) { + dims.d[0] = 1; + } + optimization_profile->setDimensions(bind_name, nvinfer1::OptProfileSelector::kMAX, dims); + } + } + builderConfig->addOptimizationProfile(optimization_profile); + + // Creates TensorRT Engine Plan + std::unique_ptr engine( + builder->buildEngineWithConfig(*network, *builderConfig)); + if (!engine) { + GXF_LOG_ERROR("Failed to build TensorRT engine from model %s.", model_file); + return InferencerErrorCode::INVALID_ARGUMENT; + } + + std::unique_ptr model_stream(engine->serialize()); + if (!model_stream || model_stream->size() == 0 || model_stream->data() == nullptr) { + GXF_LOG_ERROR("Fail to serialize TensorRT Engine."); + return InferencerErrorCode::INVALID_ARGUMENT; + } + + // Prepares return value + const char* data = static_cast(model_stream->data()); + m_modelEngineStream.resize(model_stream->size()); + std::copy(data, data + model_stream->size(), m_modelEngineStream.data()); + m_modelEngineStreamSize = model_stream->size(); + return InferencerErrorCode::SUCCESS; +} + +// Writes engine plan to specified file path +std::error_code SerializeEnginePlan(const std::vector& plan, const std::string path) { + // Write Plan To Disk + std::ofstream out_stream(path.c_str(), std::ofstream::binary); + if (!out_stream.is_open()) { + GXF_LOG_ERROR("Failed to open engine file %s.", path.c_str()); + return InferencerErrorCode::TENSORRT_ENGINE_ERROR; + } + out_stream.write(plan.data(), plan.size()); + if (out_stream.bad()) { + GXF_LOG_ERROR("Failed writing engine file %s.", path.c_str()); + return InferencerErrorCode::TENSORRT_ENGINE_ERROR; + } + out_stream.close(); + GXF_LOG_INFO("TensorRT engine serialized at %s", path.c_str()); + return InferencerErrorCode::SUCCESS; +} + +TensorRTInferencer::TensorRTInferencer(const TensorRTInferenceParams& params) + : m_logger(new TRTLogger()) + , m_maxBatchSize(params.maxBatchSize) + , m_inputLayers(params.inputLayerNames) + , m_outputLayers(params.outputLayerNames) + , m_cudaStream(0) + , m_inferenceEngine(nullptr) { + if (params.inferType == TRTInferenceType::TRT_ENGINE) { + std::ifstream trtModelFStream(params.engineFilePath, std::ios::binary); + const bool shouldRebuild = params.force_engine_update || !trtModelFStream.good(); + const bool canRebuild = params.onnxFilePath.size() != 0; + if (canRebuild && shouldRebuild) { + // Deletes engine plan file if exists for forced update + std::remove(params.engineFilePath.c_str()); + if (std::ifstream(params.engineFilePath).good()) { + GXF_LOG_ERROR("Failed to remove engine plan file %s for forced engine update.", + params.engineFilePath.c_str()); + } + GXF_LOG_INFO( + "Rebuilding CUDA engine %s%s. " + "Note: this process may take up to several minutes.", + params.force_engine_update ? " (forced by config)" : "", + params.engineFilePath.c_str()); + auto result = convertModelToEngine(params.dlaID, params.onnxFilePath.c_str(), + params.max_workspace_size, params.buildFlags, params.maxBatchSize); + if (result != InferencerErrorCode::SUCCESS) { + GXF_LOG_INFO("Failed to create engine plan for model %s.", + params.onnxFilePath.c_str()); + } + + // Tries to serializes the plan and proceeds anyway + if (SerializeEnginePlan(m_modelEngineStream, params.engineFilePath) != + InferencerErrorCode::SUCCESS) { + GXF_LOG_INFO( + "Engine plan serialization failed. Proceeds with in-memory" + "engine plan anyway."); + } + } else { + GXF_LOG_INFO("Using CUDA engine %s. ", params.engineFilePath.c_str()); + + trtModelFStream.seekg(0, trtModelFStream.end); + m_modelEngineStreamSize = trtModelFStream.tellg(); + m_modelEngineStream.resize(m_modelEngineStreamSize); + trtModelFStream.seekg(0, trtModelFStream.beg); + trtModelFStream.read(m_modelEngineStream.data(), m_modelEngineStreamSize); + trtModelFStream.close(); + } + + m_inferenceRuntime.reset(nvinfer1::createInferRuntime(*(m_logger.get()))); + if (params.dlaID != -1 && params.dlaID < m_inferenceRuntime->getNbDLACores()) { + m_inferenceRuntime->setDLACore(params.dlaID); + } + m_inferenceEngine = m_inferenceRuntime->deserializeCudaEngine(m_modelEngineStream.data(), + m_modelEngineStreamSize); + m_ownedInferenceEngine.reset(m_inferenceEngine); + m_inferenceContext.reset(m_inferenceEngine->createExecutionContext()); + m_inferenceContext->setOptimizationProfileAsync(0, m_cudaStream); + } else { + if (params.engine == nullptr) { + throw ErrorCode::INVALID_ARGUMENT; + } + m_inferenceEngine = params.engine; + m_inferenceContext.reset(m_inferenceEngine->createExecutionContext()); + } + + if (m_inferenceEngine == nullptr || m_inferenceContext == nullptr) { + throw ErrorCode::INVALID_ARGUMENT; + } + + m_hasImplicitBatch = m_inferenceEngine->hasImplicitBatchDimension(); + m_bindingsCount = m_inferenceEngine->getNbBindings(); + if (!m_hasImplicitBatch) { + for (size_t i = 0; i < m_bindingsCount; i++) { + if (m_inferenceEngine->bindingIsInput(i)) { + nvinfer1::Dims dims_i(m_inferenceEngine->getBindingDimensions(i)); + nvinfer1::Dims4 inputDims{1, dims_i.d[1], dims_i.d[2], dims_i.d[3]}; + m_inferenceContext->setBindingDimensions(i, inputDims); + } + } + } + std::error_code err; + err = ParseTRTModel(); + if (err != make_error_code(ErrorCode::SUCCESS)) { + throw err; + } + m_buffers.resize(m_bindingsCount); +} + +// Set input layer tensor +std::error_code TensorRTInferencer::setInput(const TensorBase& trtInputBuffer, + std::string inputLayerName) { + if (m_modelInfo.inputLayers.find(inputLayerName) == m_modelInfo.inputLayers.end()) { + return ErrorCode::INVALID_ARGUMENT; + } + LayerInfo layer = m_modelInfo.inputLayers[inputLayerName]; + m_buffers[layer.index] = trtInputBuffer.getData(); + return ErrorCode::SUCCESS; +} + +// Sets output layer tensor +std::error_code TensorRTInferencer::setOutput(TensorBase& trtOutputBuffer, + std::string outputLayerName) { + if (m_modelInfo.outputLayers.find(outputLayerName) == m_modelInfo.outputLayers.end()) { + return ErrorCode::INVALID_ARGUMENT; + } + LayerInfo layer = m_modelInfo.outputLayers[outputLayerName]; + m_buffers[layer.index] = trtOutputBuffer.getData(); + return ErrorCode::SUCCESS; +} + +// Get the model metadata parsed based on the model file +// This would be done in initialize call itself. User can access the modelMetaData +// created using this API. +ModelMetaData TensorRTInferencer::getModelMetaData() const { + return m_modelInfo; +} + +std::error_code TensorRTInferencer::infer(size_t batchSize) { + bool err = true; + if (!m_hasImplicitBatch) { + size_t bindingsCount = m_inferenceEngine->getNbBindings(); + for (size_t i = 0; i < bindingsCount; i++) { + if (m_inferenceEngine->bindingIsInput(i)) { + nvinfer1::Dims dims_i(m_inferenceEngine->getBindingDimensions(i)); + nvinfer1::Dims4 inputDims{static_cast(batchSize), dims_i.d[1], + dims_i.d[2], dims_i.d[3]}; + m_inferenceContext->setBindingDimensions(i, inputDims); + } + } + err = m_inferenceContext->enqueueV2(&m_buffers[0], m_cudaStream, nullptr); + } else { + err = m_inferenceContext->enqueue(m_maxBatchSize, &m_buffers[0], m_cudaStream, nullptr); + } + if (!err) { + return InferencerErrorCode::TENSORRT_INFERENCE_ERROR; + } + return ErrorCode::SUCCESS; +} + +// Applicable only for Native TRT +std::error_code TensorRTInferencer::setCudaStream(cudaStream_t cudaStream) { + m_cudaStream = cudaStream; + return ErrorCode::SUCCESS; +} + +std::error_code TensorRTInferencer::unregister(std::string layerName) { + size_t index; + if (m_modelInfo.outputLayers.find(layerName) != m_modelInfo.outputLayers.end()) { + index = m_modelInfo.outputLayers[layerName].index; + } else if (m_modelInfo.inputLayers.find(layerName) != m_modelInfo.inputLayers.end()) { + index = m_modelInfo.inputLayers[layerName].index; + } else { + return ErrorCode::INVALID_ARGUMENT; + } + m_buffers[index] = nullptr; + return ErrorCode::SUCCESS; +} + +std::error_code TensorRTInferencer::unregister() { + for (size_t i = 0; i < m_buffers.size(); i++) { + m_buffers[i] = nullptr; + } + return ErrorCode::SUCCESS; +} + +TensorRTInferencer::~TensorRTInferencer() { + m_buffers.clear(); +} + +} // namespace inferencer +} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/tensorrt/TensorRTInferencer.h b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TensorRTInferencer.h similarity index 54% rename from isaac_ros_ess/gxf/ess/cvcore/src/inferencer/tensorrt/TensorRTInferencer.h rename to isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TensorRTInferencer.h index b2a6535..21255ff 100644 --- a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/tensorrt/TensorRTInferencer.h +++ b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TensorRTInferencer.h @@ -15,46 +15,58 @@ // // SPDX-License-Identifier: Apache-2.0 -#ifndef TENSORRT_INFERENCER_H -#define TENSORRT_INFERENCER_H +#pragma once #include +#include +#include +#include #include -#include "cv/inferencer/Errors.h" -#include "cv/inferencer/IInferenceBackend.h" -#include "cv/inferencer/Inferencer.h" +#include +#include "Errors.h" +#include "IInferenceBackend.h" +#include "Inferencer.h" -namespace cvcore { namespace inferencer { +namespace cvcore { +namespace inferencer { -class TensorRTInferencer : public IInferenceBackendClient -{ -public: - TensorRTInferencer(const TensorRTInferenceParams ¶ms); +using TensorBase = cvcore::tensor_ops::TensorBase; +class TensorRTInferencer : public IInferenceBackendClient { + public: + TensorRTInferencer(const TensorRTInferenceParams& params); // Set input layer tensor - virtual std::error_code setInput(const cvcore::TensorBase &trtInputBuffer, std::string inputLayerName) override; + std::error_code setInput(const TensorBase& trtInputBuffer, + std::string inputLayerName) override; // Sets output layer tensor - virtual std::error_code setOutput(cvcore::TensorBase &trtOutputBuffer, std::string outputLayerName) override; + std::error_code setOutput(TensorBase& trtOutputBuffer, + std::string outputLayerName) override; // Get the model metadata parsed based on the model file - // This would be done in initialize call itself. User can access the modelMetaData created using this API. - virtual ModelMetaData getModelMetaData() const override; + // This would be done in initialize call itself. User can access the + // modelMetaData created using this API. + ModelMetaData getModelMetaData() const override; + + // Convert onnx mode to engine file + std::error_code convertModelToEngine(int32_t dla_core, + const char* model_file, int64_t max_workspace_size, int32_t buildFlags, + std::size_t max_batch_size); // TensorRT will use infer and TensorRT would use enqueueV2 - virtual std::error_code infer(size_t batchSize = 1) override; + std::error_code infer(size_t batchSize = 1) override; // Applicable only for Native TRT - virtual std::error_code setCudaStream(cudaStream_t) override; // Only in TRT + std::error_code setCudaStream(cudaStream_t) override; // Unregister shared memory for layer - virtual std::error_code unregister(std::string layerName) override; + std::error_code unregister(std::string layerName) override; // Unregister all shared memory - virtual std::error_code unregister() override; + std::error_code unregister() override; -private: + private: ~TensorRTInferencer(); std::unique_ptr m_logger; std::unique_ptr m_inferenceRuntime; @@ -62,17 +74,19 @@ class TensorRTInferencer : public IInferenceBackendClient std::vector m_inputLayers; std::vector m_outputLayers; cudaStream_t m_cudaStream; - nvinfer1::ICudaEngine *m_inferenceEngine; + nvinfer1::ICudaEngine* m_inferenceEngine; std::unique_ptr m_ownedInferenceEngine; std::unique_ptr m_inferenceContext; size_t m_bindingsCount; ModelMetaData m_modelInfo; - std::vector m_buffers; + std::vector m_buffers; bool m_hasImplicitBatch; + std::vector m_modelEngineStream; + size_t m_modelEngineStreamSize = 0; std::error_code ParseTRTModel(); - std::error_code getLayerInfo(LayerInfo &layer, std::string layerName); + std::error_code getLayerInfo(LayerInfo& layer, std::string layerName); }; -}} // namespace cvcore::inferencer -#endif +} // namespace inferencer +} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TensorRTUtils.cpp b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TensorRTUtils.cpp new file mode 100644 index 0000000..d370049 --- /dev/null +++ b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TensorRTUtils.cpp @@ -0,0 +1,57 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +#include "gems/dnn_inferencer/inferencer/TensorRTUtils.h" +#include + +namespace cvcore { +namespace inferencer { +using TensorLayout = cvcore::tensor_ops::TensorLayout; +using ChannelType = cvcore::tensor_ops::ChannelType; +using ChannelCount = cvcore::tensor_ops::ChannelCount; +using ErrorCode = cvcore::tensor_ops::ErrorCode; + +std::error_code getCVCoreChannelTypeFromTensorRT(ChannelType& channelType, + nvinfer1::DataType dtype) { + if (dtype == nvinfer1::DataType::kINT8) { + channelType = ChannelType::U8; + } else if (dtype == nvinfer1::DataType::kHALF) { + channelType = ChannelType::F16; + } else if (dtype == nvinfer1::DataType::kFLOAT) { + channelType = ChannelType::F32; + } else { + return ErrorCode::INVALID_OPERATION; + } + + return ErrorCode::SUCCESS; +} + +std::error_code getCVCoreChannelLayoutFromTensorRT(TensorLayout& channelLayout, + nvinfer1::TensorFormat tensorFormat) { + if (tensorFormat == nvinfer1::TensorFormat::kLINEAR) { + channelLayout = TensorLayout::NCHW; + } else if (tensorFormat == nvinfer1::TensorFormat::kHWC) { + channelLayout = TensorLayout::HWC; + } else { + return ErrorCode::INVALID_OPERATION; + } + + return ErrorCode::SUCCESS; +} + +} // namespace inferencer +} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/tensorrt/TensorRTUtils.h b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TensorRTUtils.h similarity index 69% rename from isaac_ros_ess/gxf/ess/cvcore/src/inferencer/tensorrt/TensorRTUtils.h rename to isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TensorRTUtils.h index 290ea70..e4139f5 100644 --- a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/tensorrt/TensorRTUtils.h +++ b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TensorRTUtils.h @@ -14,14 +14,14 @@ // limitations under the License. // // SPDX-License-Identifier: Apache-2.0 -#ifndef TENSORRT_UTILS_H -#define TENSORRT_UTILS_H +#pragma once #include "NvInferRuntime.h" -#include "cv/core/Tensor.h" -#include "cv/inferencer/Errors.h" +#include "extensions/tensorops/core/Tensor.h" +#include "Errors.h" -namespace cvcore { namespace inferencer { +namespace cvcore { +namespace inferencer { /* * Maps tensorrt datatype to cvcore Channel type. @@ -29,7 +29,9 @@ namespace cvcore { namespace inferencer { * @param dtype tensorrt datatype * return error code */ -std::error_code getCVCoreChannelTypeFromTensorRT(cvcore::ChannelType &channelType, nvinfer1::DataType dtype); +std::error_code getCVCoreChannelTypeFromTensorRT( + cvcore::tensor_ops::ChannelType& channelType, + nvinfer1::DataType dtype); /* * Maps tensorrt datatype to cvcore Channel type. @@ -37,9 +39,9 @@ std::error_code getCVCoreChannelTypeFromTensorRT(cvcore::ChannelType &channelTyp * @param dtype tensorrt layout * return error code */ -std::error_code getCVCoreChannelLayoutFromTensorRT(cvcore::TensorLayout &channelLayout, - nvinfer1::TensorFormat tensorFormat); +std::error_code getCVCoreChannelLayoutFromTensorRT( + cvcore::tensor_ops::TensorLayout& channelLayout, + nvinfer1::TensorFormat tensorFormat); -}} // namespace cvcore::inferencer - -#endif +} // namespace inferencer +} // namespace cvcore diff --git a/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TritionUtils.cpp b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TritionUtils.cpp new file mode 100644 index 0000000..1c27d6d --- /dev/null +++ b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TritionUtils.cpp @@ -0,0 +1,64 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +#ifdef ENABLE_TRITON + +#include +#include +#include "gems/dnn_inferencer/inferencer/TritonUtils.h" + +namespace cvcore { +namespace inferencer { +using ChannelType = cvcore::tensor_ops::ChannelType; +bool getCVCoreChannelType(ChannelType& channelType, std::string dtype) { + if (dtype.compare("UINT8") == 0) { + channelType = ChannelType::U8; + } else if (dtype.compare("UINT16") == 0) { + channelType = ChannelType::U16; + } else if (dtype.compare("FP16") == 0) { + channelType = ChannelType::F16; + } else if (dtype.compare("FP32") == 0) { + channelType = ChannelType::F32; + } else if (dtype.compare("FP64") == 0) { + channelType = ChannelType::F64; + } else { + return false; + } + + return true; +} + +bool getTritonChannelType(std::string& dtype, ChannelType channelType) { + if (channelType == ChannelType::U8) { + dtype = "UINT8"; + } else if (channelType == ChannelType::U16) { + dtype = "UINT16"; + } else if (channelType == ChannelType::F16) { + dtype = "FP16"; + } else if (channelType == ChannelType::F32) { + dtype = "FP32"; + } else if (channelType == ChannelType::F64) { + dtype = "FP64"; + } else { + return false; + } + + return true; +} + +} // namespace inferencer +} // namespace cvcore +#endif diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/triton/TritonGrpcInferencer.cpp b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TritonGrpcInferencer.cpp similarity index 74% rename from isaac_ros_ess/gxf/ess/cvcore/src/inferencer/triton/TritonGrpcInferencer.cpp rename to isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TritonGrpcInferencer.cpp index f30e8c1..2e4b3ad 100644 --- a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/triton/TritonGrpcInferencer.cpp +++ b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TritonGrpcInferencer.cpp @@ -15,29 +15,32 @@ // // SPDX-License-Identifier: Apache-2.0 #ifdef ENABLE_TRITON -#include "TritonGrpcInferencer.h" +#include "gems/dnn_inferencer/inferencer/TritonGrpcInferencer.h" #include #include +#include +#include #include "TritonUtils.h" -#include "cv/inferencer/Errors.h" -#include "cv/inferencer/IInferenceBackend.h" -#include "cv/inferencer/Inferencer.h" -namespace cvcore { namespace inferencer { +#include "gems/dnn_inferencer/inferencer/Errors.h" +#include "gems/dnn_inferencer/inferencer/IInferenceBackend.h" +#include "gems/dnn_inferencer/inferencer/Inferencer.h" + +namespace cvcore { +namespace inferencer { namespace tc = triton::client; +using ChannelType = cvcore::tensor_ops::ChannelType; namespace { -size_t getDataSize(const std::vector &shape, cvcore::ChannelType dataType) -{ +size_t getDataSize(const std::vector& shape, ChannelType dataType) { size_t layerShape = 1; for (size_t k = 0; k < shape.size(); k++) layerShape *= shape[k] <= 0 ? 1 : shape[k]; return layerShape * GetChannelSize(dataType); } -} // namespace +} // namespace -std::error_code TritonGrpcInferencer::ParseGrpcModel() -{ +std::error_code TritonGrpcInferencer::ParseGrpcModel() { inference::ModelMetadataResponse tritonModelMetadata; inference::ModelConfigResponse modelConfig; @@ -48,43 +51,39 @@ std::error_code TritonGrpcInferencer::ParseGrpcModel() err = client->ModelConfig(&modelConfig, modelName, modelVersion); modelInfo.maxBatchSize = modelConfig.config().max_batch_size(); bool inputBatchDim = modelInfo.maxBatchSize > 0; - for (int i = 0; i < tritonModelMetadata.inputs().size(); i++) - { + for (int i = 0; i < tritonModelMetadata.inputs().size(); i++) { LayerInfo layer; layer.name = tritonModelMetadata.inputs(i).name(); layer.index = i; - bool parseStatus = getCVCoreChannelType(layer.dataType, tritonModelMetadata.inputs(i).datatype()); - if (!parseStatus) - { + bool parseStatus = getCVCoreChannelType(layer.dataType, + tritonModelMetadata.inputs(i).datatype()); + if (!parseStatus) { return ErrorCode::INVALID_OPERATION; } size_t cnt = modelInfo.maxBatchSize == 0 ? 0 : 1; if (modelInfo.maxBatchSize != 0) layer.shape.push_back(modelInfo.maxBatchSize); - for (; cnt < tritonModelMetadata.inputs(i).shape().size(); cnt++) - { + for (; cnt < tritonModelMetadata.inputs(i).shape().size(); cnt++) { layer.shape.push_back(tritonModelMetadata.inputs(i).shape(cnt)); } layer.layerSize = getDataSize(layer.shape, layer.dataType); modelInfo.inputLayers[layer.name] = layer; } - for (int i = 0; i < tritonModelMetadata.outputs().size(); i++) - { + for (int i = 0; i < tritonModelMetadata.outputs().size(); i++) { LayerInfo layer; layer.name = tritonModelMetadata.outputs(i).name(); layer.index = i; - bool parseStatus = getCVCoreChannelType(layer.dataType, tritonModelMetadata.inputs(i).datatype()); - if (!parseStatus) - { + bool parseStatus = getCVCoreChannelType(layer.dataType, + tritonModelMetadata.inputs(i).datatype()); + if (!parseStatus) { return ErrorCode::INVALID_OPERATION; } layer.layout = TensorLayout::NHWC; size_t cnt = modelInfo.maxBatchSize == 0 ? 0 : 1; if (modelInfo.maxBatchSize != 0) layer.shape.push_back(modelInfo.maxBatchSize); - for (; cnt < tritonModelMetadata.outputs(i).shape().size(); cnt++) - { + for (; cnt < tritonModelMetadata.outputs(i).shape().size(); cnt++) { layer.shape.push_back(tritonModelMetadata.outputs(i).shape(cnt)); } modelInfo.outputLayers[layer.name] = layer; @@ -93,28 +92,24 @@ std::error_code TritonGrpcInferencer::ParseGrpcModel() return ErrorCode::SUCCESS; } -TritonGrpcInferencer::TritonGrpcInferencer(const TritonRemoteInferenceParams ¶ms) +TritonGrpcInferencer::TritonGrpcInferencer(const TritonRemoteInferenceParams& params) : modelVersion(params.modelVersion) - , modelName(params.modelName) -{ - - tc::Error err = tc::InferenceServerGrpcClient::Create(&client, params.serverUrl, params.verbose); + , modelName(params.modelName) { + tc::Error err = tc::InferenceServerGrpcClient::Create(&client, params.serverUrl, + params.verbose); - if (!err.IsOk()) - { + if (!err.IsOk()) { throw make_error_code(InferencerErrorCode::TRITON_SERVER_NOT_READY); } // Unregistering all shared memory regions for a clean // start. err = client->UnregisterSystemSharedMemory(); - if (!err.IsOk()) - { + if (!err.IsOk()) { throw make_error_code(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR); } err = client->UnregisterCudaSharedMemory(); - if (!err.IsOk()) - { + if (!err.IsOk()) { throw make_error_code(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR); } @@ -127,46 +122,40 @@ TritonGrpcInferencer::TritonGrpcInferencer(const TritonRemoteInferenceParams &pa outputRequests.resize(modelInfo.outputLayers.size()); outputMap.resize(modelInfo.outputLayers.size()); outputMapHistory.resize(modelInfo.outputLayers.size()); - for (auto &it : modelInfo.inputLayers) - { + for (auto &it : modelInfo.inputLayers) { tc::InferInput *inferInputVal; std::string tritonDataType; bool parseStatus = getTritonChannelType(tritonDataType, it.second.dataType); - if (!parseStatus) - { + if (!parseStatus) { throw make_error_code(InferencerErrorCode::TRITON_REGISTER_LAYER_ERROR); } - err = tc::InferInput::Create(&inferInputVal, it.second.name, it.second.shape, tritonDataType); - if (!err.IsOk()) - { + err = tc::InferInput::Create(&inferInputVal, it.second.name, it.second.shape, + tritonDataType); + if (!err.IsOk()) { throw make_error_code(InferencerErrorCode::TRITON_REGISTER_LAYER_ERROR); } inputRequests[it.second.index].reset(inferInputVal); } - for (auto &it : modelInfo.outputLayers) - { + for (auto &it : modelInfo.outputLayers) { tc::InferRequestedOutput *output; err = tc::InferRequestedOutput::Create(&output, it.second.name); - if (!err.IsOk()) - { + if (!err.IsOk()) { throw make_error_code(InferencerErrorCode::TRITON_REGISTER_LAYER_ERROR); } outputRequests[it.second.index].reset(output); } } -cudaError_t CreateCUDAIPCHandle(cudaIpcMemHandle_t *cuda_handle, void *input_d_ptr, int deviceId = 0) -{ +cudaError_t CreateCUDAIPCHandle(cudaIpcMemHandle_t* cuda_handle, void* input_d_ptr, + int deviceId = 0) { // Set the GPU device to the desired GPU cudaError_t err; err = cudaSetDevice(deviceId); - if (err != cudaSuccess) - { + if (err != cudaSuccess) { return err; } err = cudaIpcGetMemHandle(cuda_handle, input_d_ptr); - if (err != cudaSuccess) - { + if (err != cudaSuccess) { return err; } return cudaSuccess; @@ -174,43 +163,40 @@ cudaError_t CreateCUDAIPCHandle(cudaIpcMemHandle_t *cuda_handle, void *input_d_p } // Set input layer tensor -std::error_code TritonGrpcInferencer::setInput(const cvcore::TensorBase &trtInputBuffer, std::string inputLayerName) -{ - if (trtInputBuffer.isCPU()) - { +std::error_code TritonGrpcInferencer::setInput(const cvcore::TensorBase& trtInputBuffer, + std::string inputLayerName) { + if (trtInputBuffer.isCPU()) { return ErrorCode::INVALID_ARGUMENT; } size_t index = modelInfo.inputLayers[inputLayerName].index; - if (inputMapHistory[index] != (void *)trtInputBuffer.getData()) - { + if (inputMapHistory[index] != reinterpret_cast(trtInputBuffer.getData())) { inputMapHistory[index] = trtInputBuffer.getData(); unregister(inputLayerName); cudaIpcMemHandle_t input_cuda_handle; - cudaError_t cudaStatus = CreateCUDAIPCHandle(&input_cuda_handle, (void *)trtInputBuffer.getData()); - if (cudaStatus != cudaSuccess) - { + cudaError_t cudaStatus = CreateCUDAIPCHandle(&input_cuda_handle, + reinterpret_cast(trtInputBuffer.getData())); + if (cudaStatus != cudaSuccess) { return make_error_code(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR); } tc::Error err; err = client->RegisterCudaSharedMemory(inputLayerName.c_str(), input_cuda_handle, 0, trtInputBuffer.getDataSize()); - if (!err.IsOk()) - { + if (!err.IsOk()) { return make_error_code(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR); } size_t index = modelInfo.inputLayers[inputLayerName].index; - err = inputRequests[index]->SetSharedMemory(inputLayerName.c_str(), trtInputBuffer.getDataSize(), 0); + err = inputRequests[index]->SetSharedMemory(inputLayerName.c_str(), + trtInputBuffer.getDataSize(), 0); inputMap[index] = inputRequests[index].get(); - if (!err.IsOk()) - { + if (!err.IsOk()) { return make_error_code(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR); - err = inputRequests[index]->SetSharedMemory(inputLayerName.c_str(), trtInputBuffer.getDataSize(), 0); + err = inputRequests[index]->SetSharedMemory(inputLayerName.c_str(), + trtInputBuffer.getDataSize(), 0); inputMap[index] = inputRequests[index].get(); - if (!err.IsOk()) - { + if (!err.IsOk()) { return make_error_code(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR); } } @@ -219,32 +205,29 @@ std::error_code TritonGrpcInferencer::setInput(const cvcore::TensorBase &trtInpu } // Sets output layer tensor -std::error_code TritonGrpcInferencer::setOutput(cvcore::TensorBase &trtOutputBuffer, std::string outputLayerName) -{ - if (trtOutputBuffer.isCPU()) - { +std::error_code TritonGrpcInferencer::setOutput(cvcore::TensorBase& trtOutputBuffer, + std::string outputLayerName) { + if (trtOutputBuffer.isCPU()) { return ErrorCode::INVALID_ARGUMENT; } size_t index = modelInfo.outputLayers[outputLayerName].index; - if (outputMapHistory[index] != (void *)trtOutputBuffer.getData()) - { + if (outputMapHistory[index] != reinterpret_cast(trtOutputBuffer.getData())) { outputMapHistory[index] = trtOutputBuffer.getData(); unregister(outputLayerName); cudaIpcMemHandle_t outputCudaHandle; - CreateCUDAIPCHandle(&outputCudaHandle, (void *)trtOutputBuffer.getData()); + CreateCUDAIPCHandle(&outputCudaHandle, + reinterpret_cast(trtOutputBuffer.getData())); tc::Error err; - err = client->RegisterCudaSharedMemory(outputLayerName.c_str(), outputCudaHandle, 0 /* deviceId */, + err = client->RegisterCudaSharedMemory(outputLayerName.c_str(), outputCudaHandle, 0, trtOutputBuffer.getDataSize()); - if (!err.IsOk()) - { + if (!err.IsOk()) { return make_error_code(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR); } - err = outputRequests[index]->SetSharedMemory(outputLayerName.c_str(), trtOutputBuffer.getDataSize(), - 0 /* offset */); - if (!err.IsOk()) - { + err = outputRequests[index]->SetSharedMemory(outputLayerName.c_str(), + trtOutputBuffer.getDataSize(), 0); + if (!err.IsOk()) { return make_error_code(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR); } outputMap[index] = outputRequests[index].get(); @@ -253,46 +236,40 @@ std::error_code TritonGrpcInferencer::setOutput(cvcore::TensorBase &trtOutputBuf } // Get the model metadata parsed based on the model file -// This would be done in initialize call itself. User can access the modelMetaData created using this API. -ModelMetaData TritonGrpcInferencer::getModelMetaData() const -{ +// This would be done in initialize call itself. User can access the modelMetaData created +// using this API. +ModelMetaData TritonGrpcInferencer::getModelMetaData() const { return modelInfo; } // Triton will use infer and TensorRT would use enqueueV2 -std::error_code TritonGrpcInferencer::infer(size_t batchSize) -{ +std::error_code TritonGrpcInferencer::infer(size_t batchSize) { tc::InferResult *results; tc::Headers httpHeaders; tc::InferOptions options(modelInfo.modelName); options.model_version_ = modelInfo.modelVersion; - for (auto &inputLayer : modelInfo.inputLayers) - { + for (auto &inputLayer : modelInfo.inputLayers) { LayerInfo inputLayerInfo = inputLayer.second; size_t index = inputLayerInfo.index; tc::Error err; - err = - inputRequests[index]->SetSharedMemory(inputLayerInfo.name.c_str(), inputLayerInfo.layerSize * batchSize, 0); - if (!err.IsOk()) - { + err = inputRequests[index]->SetSharedMemory(inputLayerInfo.name.c_str(), + inputLayerInfo.layerSize * batchSize, 0); + if (!err.IsOk()) { return make_error_code(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR); } } - for (auto &outputLayer : modelInfo.outputLayers) - { + for (auto &outputLayer : modelInfo.outputLayers) { LayerInfo outputLayerInfo = outputLayer.second; size_t index = outputLayerInfo.index; tc::Error err; err = outputRequests[index]->SetSharedMemory(outputLayerInfo.name.c_str(), - outputLayerInfo.layerSize * batchSize, 0); - if (!err.IsOk()) - { + outputLayerInfo.layerSize * batchSize, 0); + if (!err.IsOk()) { return make_error_code(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR); } } tc::Error err = client->Infer(&results, options, inputMap, outputMap, httpHeaders); - if (!err.IsOk()) - { + if (!err.IsOk()) { return make_error_code(InferencerErrorCode::TRITON_INFERENCE_ERROR); } @@ -300,43 +277,37 @@ std::error_code TritonGrpcInferencer::infer(size_t batchSize) } // Applicable only for Native TRT -std::error_code TritonGrpcInferencer::setCudaStream(cudaStream_t) // Only in TRT -{ +std::error_code TritonGrpcInferencer::setCudaStream(cudaStream_t) { return ErrorCode::INVALID_OPERATION; } -std::error_code TritonGrpcInferencer::unregister(std::string layerName) -{ +std::error_code TritonGrpcInferencer::unregister(std::string layerName) { tc::Error err; inference::CudaSharedMemoryStatusResponse status; err = client->CudaSharedMemoryStatus(&status); - if (!err.IsOk()) - { + if (!err.IsOk()) { return make_error_code(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR); } err = client->UnregisterCudaSharedMemory(layerName.c_str()); - if (!err.IsOk()) - { + if (!err.IsOk()) { return make_error_code(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR); } return ErrorCode::SUCCESS; } -std::error_code TritonGrpcInferencer::unregister() -{ +std::error_code TritonGrpcInferencer::unregister() { tc::Error err; inference::CudaSharedMemoryStatusResponse status; err = client->CudaSharedMemoryStatus(&status); - if (!err.IsOk()) - { + if (!err.IsOk()) { return make_error_code(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR); } err = client->UnregisterCudaSharedMemory(); - if (!err.IsOk()) - { + if (!err.IsOk()) { return make_error_code(InferencerErrorCode::TRITON_CUDA_SHARED_MEMORY_ERROR); } return ErrorCode::SUCCESS; } -}} // namespace cvcore::inferencer +} // namespace inferencer +} // namespace cvcore #endif diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/triton/TritonGrpcInferencer.h b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TritonGrpcInferencer.h similarity index 60% rename from isaac_ros_ess/gxf/ess/cvcore/src/inferencer/triton/TritonGrpcInferencer.h rename to isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TritonGrpcInferencer.h index d02b3e2..2e74e99 100644 --- a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/triton/TritonGrpcInferencer.h +++ b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TritonGrpcInferencer.h @@ -14,62 +14,65 @@ // limitations under the License. // // SPDX-License-Identifier: Apache-2.0 - +#pragma once #ifdef ENABLE_TRITON -#ifndef TRITONGRPC_INFERENCER_H -#define TRITONGRPC_INFERENCER_H #include #include - +#include +#include +#include #include "cv/inferencer/Errors.h" #include "cv/inferencer/IInferenceBackend.h" #include "cv/inferencer/Inferencer.h" -namespace cvcore { namespace inferencer { +namespace cvcore { +namespace inferencer { namespace tc = triton::client; -class TritonGrpcInferencer : public IInferenceBackendClient -{ -public: - TritonGrpcInferencer(const TritonRemoteInferenceParams ¶ms); +class TritonGrpcInferencer : public IInferenceBackendClient { + public: + TritonGrpcInferencer(const TritonRemoteInferenceParams& params); // Set input layer tensor - virtual std::error_code setInput(const cvcore::TensorBase &trtInputBuffer, std::string inputLayerName) override; + std::error_code setInput(const cvcore::TensorBase& trtInputBuffer, + std::string inputLayerName) override; // Sets output layer tensor - virtual std::error_code setOutput(cvcore::TensorBase &trtOutputBuffer, std::string outputLayerName) override; + std::error_code setOutput(cvcore::TensorBase& trtOutputBuffer, + std::string outputLayerName) override; // Get the model metadata parsed based on the model file - // This would be done in initialize call itself. User can access the modelMetaData created using this API. - virtual ModelMetaData getModelMetaData() const override; + // This would be done in initialize call itself. User can access the modelMetaData + // created using this API. + ModelMetaData getModelMetaData() const override; // Triton will use infer and TensorRT would use enqueueV2 - virtual std::error_code infer(size_t batchSize = 1) override; + std::error_code infer(size_t batchSize = 1) override; // Applicable only for Native TRT - virtual std::error_code setCudaStream(cudaStream_t) override; // Only in TRT + std::error_code setCudaStream(cudaStream_t) override; // Unregister shared memory for layer - virtual std::error_code unregister(std::string layerName) override; + std::error_code unregister(std::string layerName) override; // Unregister all shared memory - virtual std::error_code unregister() override; + std::error_code unregister() override; -private: + private: // Parse grpc model std::error_code ParseGrpcModel(); std::unique_ptr client; ModelMetaData modelInfo; std::vector> inputRequests; std::vector> outputRequests; - std::vector inputMap; - std::vector inputMapHistory; - std::vector outputMapHistory; - std::vector outputMap; + std::vector inputMap; + std::vector inputMapHistory; + std::vector outputMapHistory; + std::vector outputMap; std::string modelVersion, modelName; }; -}} // namespace cvcore::inferencer -#endif +} // namespace inferencer +} // namespace cvcore #endif diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/triton/TritonUtils.h b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TritonUtils.h similarity index 81% rename from isaac_ros_ess/gxf/ess/cvcore/src/inferencer/triton/TritonUtils.h rename to isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TritonUtils.h index bb13553..9a3ee8d 100644 --- a/isaac_ros_ess/gxf/ess/cvcore/src/inferencer/triton/TritonUtils.h +++ b/isaac_ros_ess/gxf/ess/gems/dnn_inferencer/inferencer/TritonUtils.h @@ -14,16 +14,17 @@ // limitations under the License. // // SPDX-License-Identifier: Apache-2.0 -#ifdef ENABLE_TRITON -#ifndef TRITON_UTILS_H -#define TRITON_UTILS_H +#pragma once +#ifdef ENABLE_TRITON #include +#include #include "cv/core/Tensor.h" #include "cv/inferencer/Errors.h" -namespace cvcore { namespace inferencer { +namespace cvcore { +namespace inferencer { /* * Maps triton datatype to cvcore Channel type. @@ -31,7 +32,7 @@ namespace cvcore { namespace inferencer { * @param dtype String representing triton datatype * return bool returns false if mapping was not successful. */ -bool getCVCoreChannelType(cvcore::ChannelType &channelType, std::string dtype); +bool getCVCoreChannelType(cvcore::ChannelType& channelType, std::string dtype); /* * Maps triton datatype to cvcore Channel type. @@ -39,9 +40,8 @@ bool getCVCoreChannelType(cvcore::ChannelType &channelType, std::string dtype); * @param channelType cvcore channel type. * return bool returns false if mapping was not successful. */ -bool getTritonChannelType(std::string &dtype, cvcore::ChannelType channelType); - -}} // namespace cvcore::inferencer -#endif // TRITON_UTILS_H +bool getTritonChannelType(std::string& dtype, cvcore::ChannelType channelType); -#endif // ENABLE_TRITON +} // namespace inferencer +} // namespace cvcore +#endif // ENABLE_TRITON diff --git a/isaac_ros_ess/gxf/ess/gems/gxf_helpers/common_expected_macro.hpp b/isaac_ros_ess/gxf/ess/gems/gxf_helpers/common_expected_macro.hpp new file mode 100644 index 0000000..2606c6e --- /dev/null +++ b/isaac_ros_ess/gxf/ess/gems/gxf_helpers/common_expected_macro.hpp @@ -0,0 +1,305 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +#pragma once + +#include +#include +#include + +#include "common/expected.hpp" + +// Concatenates its two arguments. +#define EXPECTED_MACRO_INTERNAL_CONCAT(a, b) EXPECTED_MACRO_INTERNAL_CONCAT_IMPL(a, b) +#define EXPECTED_MACRO_INTERNAL_CONCAT_IMPL(a, b) a##b + +// Converts its argument to a string at compile time. +#define EXPECTED_MACRO_INTERNAL_TO_STRING(x) EXPECTED_MACRO_INTERNAL_TO_STRING_IMPL(x) +#define EXPECTED_MACRO_INTERNAL_TO_STRING_IMPL(x) #x + +// Gets the current location in the source code in the format "file:line". +#define EXPECTED_MACRO_INTERNAL_FILE_LINE() __FILE__ ":" EXPECTED_MACRO_INTERNAL_TO_STRING(__LINE__) + +// Helper to support logging a default message and an optional custom message. +#define EXPECTED_MACRO_INTERNAL_LOG_IN_EXPECT_MACRO(expression_result, expression_string, ...) \ + ::nvidia::internal::LogHelper( \ + __FILE__, __LINE__, expression_result, expression_string, ##__VA_ARGS__); + +#define EXPECTED_MACRO_INTERNAL_CHECK_EXPRESSION_IS_EXPECTED_VOID_OR_RESULT(expression_result) \ + static_assert( \ + ::nvidia::internal::IsExpectedVoid::value || \ + std::is_enum_v, \ + EXPECTED_MACRO_INTERNAL_FILE_LINE() ": GXF_RETURN_IF_ERROR can only be used with " \ + "expressions that return Expected or enum. For expressions returning Expected " \ + "use GXF_UNWRAP_OR_RETURN instead."); + +#define EXPECTED_MACRO_INTERNAL_CHECK_EXPRESSION_IS_EXPECTED_T(expression_result) \ + static_assert(::nvidia::internal::IsExpectedT::value, \ + EXPECTED_MACRO_INTERNAL_FILE_LINE() ": GXF_UNWRAP_OR_RETURN can only be used with " \ + "expressions that return Expected. For expressions returning Expected or enum " \ + "use RETURN_IF_ERROR instead."); + +// Evaluates an expression that returns an Expected or result +// enum. If the returned type contains an error it returns the +// error. This macro can be used in functions returning both +// Expected and result enum. +// +// Per default the macro already creates an error message that +// includes the evaluated expression. If needed an optional string can +// be passed that will be appended to the default error message. It is +// also possible to use format specifiers to customize the string. +// +// It is also possible to pass the Severity used for logging as an +// additional argument. This is required if using a custom error +// message. +// +// Example: +// Expected DoSomething(); +// Expected DoAnotherThing(); +// +// Expected foo(){ +// GXF_RETURN_IF_ERROR(DoSomething()); +// GXF_RETURN_IF_ERROR(DoAnotherThing(), Severity::WARNING); +// GXF_RETURN_IF_ERROR(DoAnotherThing(), Severity::WARNING, "Custom error message."); +// } +#define RETURN_IF_ERROR(expression, ...) \ + do { \ + auto maybe_result = (expression); \ + EXPECTED_MACRO_INTERNAL_CHECK_EXPRESSION_IS_EXPECTED_VOID_OR_RESULT(maybe_result) \ + if (!::nvidia::internal::IsValid(maybe_result)) { \ + EXPECTED_MACRO_INTERNAL_LOG_IN_EXPECT_MACRO(maybe_result, #expression, ##__VA_ARGS__) \ + return ::nvidia::internal::ProxyFactory::FromExpectedVoidOrResult(maybe_result); \ + } \ + } while (0) + +// Evaluates an expression that returns an Expected. If the returned type +// contains an error it returns the error, else it unwraps the value contained in the Expected. +// This macro can be used in functions returning both Expected and result enum. +// +// Per default the macro already creates an error message that includes the evaluated expression. If +// needed an optional string can be passed that will be appended to the default error message. It is +// also possible to use format specifiers to customize the string. +// +// It is also possible to pass the Severity used for logging as an additional argument. +// +// Note that this macro uses expression-statements (i.e. the ({ }) surrounding the macro) which are +// a non-standard functionality. However they are present in almost all compilers. We currently only +// know of MSVC that does not support this. +// +// Example: +// Expected GetString(); +// Expected GetAnotherString(); +// +// Expected CountCombinedStringLength(){ +// const std::string str1 = GXF_UNWRAP_OR_RETURN(GetString()); +// std::string str2; +// str2 = GXF_UNWRAP_OR_RETURN(GetAnotherString(), "This should not fail. Str1 has value %s.", +// str1.c_str()); +// const std::string str3 = GXF_UNWRAP_OR_RETURN(GetAnotherString(), Severity::WARNING); +// const std::string str4 = GXF_UNWRAP_OR_RETURN(GetAnotherString(), Severity::WARNING, +// "Custom error message"); +// return str1.size() + str2.size() + str3.size() + str4.size(); +// } +#define UNWRAP_OR_RETURN(expression, ...) \ + ({ \ + auto maybe_result = (expression); \ + EXPECTED_MACRO_INTERNAL_CHECK_EXPRESSION_IS_EXPECTED_T(maybe_result) \ + if (!::nvidia::internal::IsValid(maybe_result)) { \ + EXPECTED_MACRO_INTERNAL_LOG_IN_EXPECT_MACRO(maybe_result, #expression, ##__VA_ARGS__) \ + return ::nvidia::internal::ProxyFactory::FromExpectedValue(maybe_result); \ + } \ + std::move(maybe_result.value()); \ + }) + +namespace nvidia { +// This struct has to be specialized for a given result enum. +template +struct ExpectedMacroConfig { + constexpr static Error DefaultSuccess(); + constexpr static Error DefaultError(); + static std::string Name(Error error) { return std::to_string(static_cast(error)); } +}; +} // namespace nvidia + +namespace nvidia::internal { + +constexpr Severity kDefaultSeverity = Severity::ERROR; + +template +struct IsExpectedVoid : public std::false_type {}; + +template +struct IsExpectedVoid> : public std::true_type {}; + +template +struct IsExpectedT : public std::false_type {}; + +template +struct IsExpectedT> : public std::false_type {}; + +template +struct IsExpectedT> : public std::true_type {}; + +// Returns true if the passed result is valid. +template +constexpr bool IsValid(Error result) { + return result == ExpectedMacroConfig::DefaultSuccess(); +} + +// Returns true if the passed expected is valid. +template +constexpr bool IsValid(const Expected& expected) { + return static_cast(expected); +} + +template +constexpr Error GetError(const Expected& expected) { + return expected.error(); +} + +template +constexpr Error GetError(const Error error) { + return error; +} + +template +class UnexpectedOrErrorProxy; + +class ProxyFactory { + public: + // Constructs the proxy from a Expected. The Expected has to be in an error state. We + // do not check this because the macro should have already done this check. We use static + // methods instead of constructors to explicitly disallow construction from certain types in + // different situations. + template + static UnexpectedOrErrorProxy FromExpectedValue(const Expected& expected) { + static_assert( + !std::is_same::value, + "This function should not be used with Expected, use " + "FromExpectedVoidOrResult() instead."); + return UnexpectedOrErrorProxy(expected); + } + + // Constructs the proxy from a Expected. The Expected has to be in an error state. + // We do not check this because the macro should have already done this check. This function + // needs to be overloaded with a function taking in a result enum. We use static methods instead + // of constructors to explicitly disallow construction from certain types in different + // situations. + template + static UnexpectedOrErrorProxy FromExpectedVoidOrResult( + const Expected expected) { + return UnexpectedOrErrorProxy(expected); + } + + // Constructs the proxy from a result enum. The result enum has to be in an error state. We do + // not check this because the macro should have already done this check. This function needs to + // be overloaded with a function taking in an Expected. We use static methods instead of + // constructors to explicitly disallow construction from certain types in different situations. + template + constexpr static UnexpectedOrErrorProxy FromExpectedVoidOrResult(Error error) { + return UnexpectedOrErrorProxy(error); + } +}; + +// A proxy class to abstract away the difference between a result enum and an Expected. +// This class defines casting operators s.t. it can implicitly cast to both result enum and +// Expected. Thus in a function that returns one of the afore mentioned types one can simply +// return this proxy and then it will implicitly cast to the appropriate return type. +template +class UnexpectedOrErrorProxy { + public: + // Casts the proxy to an error. Note that this cast is not allowed to be explicit. The + // OtherError has to be an enum (or enum class). + template >> + constexpr operator OtherError() const { + return castToError(); + } + + // Casts the proxy to an Expected. Note that this cast is not allowed to be explicit. + template + constexpr operator Expected() const { + return castToUnexpected(); + } + + private: + // Constructs the proxy from a Expected. Note that the Expected needs to contain an error. + // We do not check for this because we rely on the macro already having done that check. + template + constexpr explicit UnexpectedOrErrorProxy(const Expected& expected) + : error_(expected.error()) {} + + // Constructs the proxy from a result enum. The result enum needs to be in an error state. + // We do not check for this because we rely on the macro already having done that check. + constexpr explicit UnexpectedOrErrorProxy(Error error) : error_(error) {} + + // Casts the proxy to any error type. If the error type is not equal to the proxy's error type, + // a default error is used. + template + constexpr OtherError castToError() const { + static_assert(std::is_enum_v, "Can only cast to errors of type enum."); + if constexpr (std::is_same_v) { + return error_; + } else { + return ExpectedMacroConfig::DefaultError(); + } + } + + // Casts the proxy to an unexpected using any error type. If the error type is not equal to the + // proxy's error type, a default error is used. + template + constexpr Unexpected castToUnexpected() const { + return Unexpected(castToError()); + } + + Error error_; + static_assert(std::is_enum_v, "Error has to be an enum."); + + friend ProxyFactory; +}; + +// Helper function for the logging in the above macros. This version should be used when the user +// also specifies the logging severity. The variadic arguments can be used to do string +// interpolation in the custom_text variable. +template +void LogHelper( + const char* file, int line, const ExpressionResult& expression_result, + const std::string& expression_string, Severity severity, const std::string& custom_txt = "", + Args... args) { + const auto error = GetError(expression_result); + using Error = std::remove_const_t; + const std::string text = "Expression '" + expression_string + "' failed with error '" + + ExpectedMacroConfig::Name(error) + "'. " + custom_txt; + // GCC is not able to do format security validation when the string + // is coming from a variadic template, even if the string is + // originally a char* ignore this warning until a more recent GCC + // version fixes this behavior +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wformat-security" + ::nvidia::Log(file, line, severity, text.c_str(), &args...); +#pragma GCC diagnostic pop +} + +// Overload of the LogHelper above. This version does not take the severity as an argument and used +// the default severity instead. +template +void LogHelper( + const char* file, int line, const ExpressionResult& expression_result, + const std::string& expression_string, const std::string& custom_text = "", Args... args) { + LogHelper( + file, line, expression_result, expression_string, kDefaultSeverity, custom_text, &args...); +} + +} // namespace nvidia::internal diff --git a/isaac_ros_ess/gxf/ess/gems/gxf_helpers/element_type.hpp b/isaac_ros_ess/gxf/ess/gems/gxf_helpers/element_type.hpp new file mode 100644 index 0000000..7fab97e --- /dev/null +++ b/isaac_ros_ess/gxf/ess/gems/gxf_helpers/element_type.hpp @@ -0,0 +1,55 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +#pragma once + +#include "engine/core/tensor/element_type.hpp" +#include "gxf/std/tensor.hpp" + +namespace nvidia { +namespace isaac { + +// Helps determine the Isaac element type given the GXF primitive type +::nvidia::isaac::ElementType ToIsaacElementType(gxf::PrimitiveType gxf_type) { + switch (gxf_type) { + case gxf::PrimitiveType::kInt8: + return ::nvidia::isaac::ElementType::kInt8; + case gxf::PrimitiveType::kUnsigned8: + return ::nvidia::isaac::ElementType::kUInt8; + case gxf::PrimitiveType::kInt16: + return ::nvidia::isaac::ElementType::kInt16; + case gxf::PrimitiveType::kUnsigned16: + return ::nvidia::isaac::ElementType::kUInt16; + case gxf::PrimitiveType::kInt32: + return ::nvidia::isaac::ElementType::kInt32; + case gxf::PrimitiveType::kUnsigned32: + return ::nvidia::isaac::ElementType::kUInt32; + case gxf::PrimitiveType::kInt64: + return ::nvidia::isaac::ElementType::kInt64; + case gxf::PrimitiveType::kUnsigned64: + return ::nvidia::isaac::ElementType::kUInt64; + case gxf::PrimitiveType::kFloat32: + return ::nvidia::isaac::ElementType::kFloat32; + case gxf::PrimitiveType::kFloat64: + return ::nvidia::isaac::ElementType::kFloat64; + case gxf::PrimitiveType::kCustom: + default: + return ::nvidia::isaac::ElementType::kUnknown; + } +} + +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/gems/gxf_helpers/expected_macro.hpp b/isaac_ros_ess/gxf/ess/gems/gxf_helpers/expected_macro.hpp new file mode 100644 index 0000000..43499b7 --- /dev/null +++ b/isaac_ros_ess/gxf/ess/gems/gxf_helpers/expected_macro.hpp @@ -0,0 +1,38 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +#pragma once + +#include + +#include "gems/gxf_helpers/common_expected_macro.hpp" +#include "gxf/core/expected.hpp" + +// This customizes the expected macro, s.t. it can be used with gxf_result_t. +namespace nvidia { +template <> +struct ExpectedMacroConfig { + constexpr static gxf_result_t DefaultSuccess() { return GXF_SUCCESS; } + constexpr static gxf_result_t DefaultError() { return GXF_FAILURE; } + static std::string Name(gxf_result_t result) { return GxfResultStr(result); } +}; + +// For back-compatibility we define an alias to the original name of the macro when it was gxf +// specific. +#define GXF_RETURN_IF_ERROR RETURN_IF_ERROR +#define GXF_UNWRAP_OR_RETURN UNWRAP_OR_RETURN + +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/gems/gxf_helpers/tensor.hpp b/isaac_ros_ess/gxf/ess/gems/gxf_helpers/tensor.hpp new file mode 100644 index 0000000..5ae069e --- /dev/null +++ b/isaac_ros_ess/gxf/ess/gems/gxf_helpers/tensor.hpp @@ -0,0 +1,137 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +#pragma once + +#include "engine/core/math/types.hpp" +#include "engine/core/tensor/tensor.hpp" +#include "gxf/core/expected.hpp" +#include "gxf/std/tensor.hpp" + +namespace nvidia { +namespace isaac { + +// Creates an Isaac tensor view of a GXF host tensor +template +gxf::Expected<::nvidia::isaac::CpuTensorView> ToIsaacCpuTensorView(gxf::Tensor& tensor) { + if ((tensor.storage_type() != gxf::MemoryStorageType::kHost) && + (tensor.storage_type() != gxf::MemoryStorageType::kSystem)) { + GXF_LOG_ERROR("Tensor does not have host or system storage type"); + return gxf::Unexpected{GXF_INVALID_DATA_FORMAT}; + } + if (tensor.rank() != N) { + GXF_LOG_ERROR("Tensor rank mismatch. Expected: %d, actual: %d", N, tensor.rank()); + return gxf::Unexpected{GXF_INVALID_DATA_FORMAT}; + } + ::nvidia::isaac::Vector dimensions; + for (int i = 0; i < N; i++) { + dimensions[i] = tensor.shape().dimension(i); + } + + auto maybe_ptr = tensor.data(); + if (!maybe_ptr) { + GXF_LOG_ERROR("Requested type for tensor pointer does not match tensor data type"); + return gxf::ForwardError(maybe_ptr); + } + + return ::nvidia::isaac::CreateCpuTensorViewFromData(maybe_ptr.value(), dimensions.prod(), + dimensions); +} + +// Creates an Isaac tensor const view of a GXF host tensor +template +gxf::Expected<::nvidia::isaac::CpuTensorConstView> +ToIsaacCpuTensorView(const gxf::Tensor& tensor) { + if ((tensor.storage_type() != gxf::MemoryStorageType::kHost) && + (tensor.storage_type() != gxf::MemoryStorageType::kSystem)) { + GXF_LOG_ERROR("Tensor does not have host or system storage type"); + return gxf::Unexpected{GXF_INVALID_DATA_FORMAT}; + } + if (tensor.rank() != N) { + GXF_LOG_ERROR("Tensor does not have expected rank %i", N); + return gxf::Unexpected{GXF_INVALID_DATA_FORMAT}; + } + ::nvidia::isaac::Vector dimensions; + for (int i = 0; i < N; i++) { + dimensions[i] = tensor.shape().dimension(i); + } + + auto maybe_ptr = tensor.data(); + if (!maybe_ptr) { + GXF_LOG_ERROR("Requested type for tensor pointer does not match tensor data type"); + return gxf::ForwardError(maybe_ptr); + } + + return ::nvidia::isaac::CreateCpuTensorConstViewFromData, N> ( + maybe_ptr.value(), dimensions.prod(), dimensions); +} + +// Creates an Isaac Cuda tensor view of a GXF Cuda tensor +template +gxf::Expected<::nvidia::isaac::GpuTensorView> ToIsaacGpuTensorView(gxf::Tensor& tensor) { + if (tensor.storage_type() != gxf::MemoryStorageType::kDevice) { + GXF_LOG_ERROR("Tensor does not have device storage type"); + return gxf::Unexpected{GXF_INVALID_DATA_FORMAT}; + } + if (tensor.rank() != N) { + GXF_LOG_ERROR("Tensor does not have expected rank %i", N); + return gxf::Unexpected{GXF_INVALID_DATA_FORMAT}; + } + ::nvidia::isaac::Vector dimensions; + for (int i = 0; i < N; i++) { + dimensions[i] = tensor.shape().dimension(i); + } + + auto maybe_ptr = tensor.data(); + if (!maybe_ptr) { + GXF_LOG_ERROR("Requested type for tensor pointer does not match tensor data type"); + return gxf::ForwardError(maybe_ptr); + } + + return ::nvidia::isaac::CreateGpuTensorViewFromData(maybe_ptr.value(), dimensions.prod(), + dimensions); +} + +// Creates an Isaac Cuda tensor const view of a GXF Cuda tensor +template +gxf::Expected<::nvidia::isaac::GpuTensorConstView> +ToIsaacGpuTensorView(const gxf::Tensor& tensor) { + if (tensor.storage_type() != gxf::MemoryStorageType::kDevice) { + GXF_LOG_ERROR("Tensor does not have device storage type"); + return gxf::Unexpected{GXF_INVALID_DATA_FORMAT}; + } + if (tensor.rank() != N) { + GXF_LOG_ERROR("Tensor does not have expected rank %i", N); + return gxf::Unexpected{GXF_INVALID_DATA_FORMAT}; + } + ::nvidia::isaac::Vector dimensions; + for (int i = 0; i < N; i++) { + dimensions[i] = tensor.shape().dimension(i); + } + + auto maybe_ptr = tensor.data(); + if (!maybe_ptr) { + GXF_LOG_ERROR("Requested type for tensor pointer does not match tensor data type"); + return gxf::ForwardError(maybe_ptr); + } + + return ::nvidia::isaac::CreateGpuTensorConstViewFromData(maybe_ptr.value(), + dimensions.prod(), + dimensions); +} + +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/gems/hash/hash_file.cpp b/isaac_ros_ess/gxf/ess/gems/hash/hash_file.cpp new file mode 100644 index 0000000..3f7aead --- /dev/null +++ b/isaac_ros_ess/gxf/ess/gems/hash/hash_file.cpp @@ -0,0 +1,71 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +#include +#include // NOLINT(build/include_order) + +#include "common/fixed_vector.hpp" +#include "common/span.hpp" +#include "gems/gxf_helpers/expected_macro.hpp" +#include "gems/hash/hash_file.hpp" + +namespace nvidia { +namespace isaac { + +namespace { + +constexpr size_t kBlockSize = 8096; + +} // namespace + +// helper to get the hash of a file +gxf::Expected hash_file(const char* path) { + std::ifstream file; + SHA256 hash; + FixedVector buffer; + + size_t bytes_remaining = std::filesystem::file_size(path); + + file.open(path, std::ios_base::in | std::ios_base::binary); + + if (!file.good()) { + return nvidia::gxf::Unexpected{GXF_FAILURE}; + } + + while (true) { + size_t current_bytes = std::min(kBlockSize, bytes_remaining); + + file.read(reinterpret_cast(buffer.data()), current_bytes); + + if (!file.good()) { + return nvidia::gxf::Unexpected{GXF_FAILURE}; + } + + GXF_RETURN_IF_ERROR(hash.hashData(Span(buffer.data(), current_bytes))); + + bytes_remaining -= current_bytes; + + if (bytes_remaining == 0) { + GXF_RETURN_IF_ERROR(hash.finalize()); + + return hash.toString(); + } + } +} + +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/IImageWarp.cpp b/isaac_ros_ess/gxf/ess/gems/hash/hash_file.hpp similarity index 68% rename from isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/IImageWarp.cpp rename to isaac_ros_ess/gxf/ess/gems/hash/hash_file.hpp index c96b07e..72cd3ca 100644 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/IImageWarp.cpp +++ b/isaac_ros_ess/gxf/ess/gems/hash/hash_file.hpp @@ -1,5 +1,5 @@ // SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,11 +14,15 @@ // limitations under the License. // // SPDX-License-Identifier: Apache-2.0 +#pragma once -#include "cv/tensor_ops/IImageWarp.h" +#include "gems/hash/sha256.hpp" -namespace cvcore { namespace tensor_ops { +namespace nvidia { +namespace isaac { -IImageWarp::~IImageWarp(){} +// helper to get the hash of a file +gxf::Expected hash_file(const char* path); -}} // namespace cvcore::tensor_ops +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/gems/hash/sha256.cpp b/isaac_ros_ess/gxf/ess/gems/hash/sha256.cpp new file mode 100644 index 0000000..d804f16 --- /dev/null +++ b/isaac_ros_ess/gxf/ess/gems/hash/sha256.cpp @@ -0,0 +1,237 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +#include "gems/hash/sha256.hpp" + +#include + +namespace nvidia { +namespace isaac { + +namespace { + +constexpr uint32_t K[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, + 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, + 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, + 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, + 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, + 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, + 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, + 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, + 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; + +#define DBL_INT_ADD(a, b, c) \ + if ((a) > 0xFFFFFFFF - (c)) { \ + ++(b); \ + } \ + (a) += (c); + +#define ROTLEFT(a, b) (((a) << (b)) | ((a) >> (32 - (b)))) +#define ROTRIGHT(a, b) (((a) >> (b)) | ((a) << (32 - (b)))) + +#define CH(x, y, z) (((x) & (y)) ^ (~(x) & (z))) +#define MAJ(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) + +#define EP0(x) (ROTRIGHT((x), 2) ^ ROTRIGHT((x), 13) ^ ROTRIGHT((x), 22)) +#define EP1(x) (ROTRIGHT((x), 6) ^ ROTRIGHT((x), 11) ^ ROTRIGHT((x), 25)) +#define SIG0(x) (ROTRIGHT((x), 7) ^ ROTRIGHT((x), 18) ^ ((x) >> 3)) +#define SIG1(x) (ROTRIGHT((x), 17) ^ ROTRIGHT((x), 19) ^ ((x) >> 10)) + +} // namespace + +gxf::Expected SHA256::Hash(const Span data) { + SHA256 hasher; + return hasher.hashData(data) + .and_then([&]() { return hasher.finalize(); }) + .substitute(hasher.hash()); +} + +gxf::Expected SHA256::Hash(const Span data) { + auto span = Span(reinterpret_cast(data.data()), data.size()); + return Hash(span).map(ToString); +} + +void SHA256::Initialize(SHA256_CTX& ctx) { + ctx.datalen = 0; + ctx.bitlen[0] = 0; + ctx.bitlen[1] = 0; + ctx.state[0] = 0x6a09e667; + ctx.state[1] = 0xbb67ae85; + ctx.state[2] = 0x3c6ef372; + ctx.state[3] = 0xa54ff53a; + ctx.state[4] = 0x510e527f; + ctx.state[5] = 0x9b05688c; + ctx.state[6] = 0x1f83d9ab; + ctx.state[7] = 0x5be0cd19; +} + +gxf::Expected SHA256::Update(SHA256_CTX& ctx, const Span data) { + for (auto element : data) { + if (!element) { + return gxf::Unexpected{GXF_FAILURE}; + } + ctx.data[ctx.datalen] = element.value(); + ctx.datalen++; + if (ctx.datalen == 64) { + auto result = Transform(ctx, Span(ctx.data)); + if (!result) { + return gxf::ForwardError(result); + } + DBL_INT_ADD(ctx.bitlen[0], ctx.bitlen[1], 512); + ctx.datalen = 0; + } + } + return gxf::Success; +} + +gxf::Expected SHA256::Transform(SHA256_CTX& ctx, const Span data) { + uint32_t m[64]; + uint32_t i = 0; + uint32_t j = 0; + for (; i < 16; i++) { + if (!data[j] || !data[j + 1] || !data[j + 2] || !data[j + 3]) { + return gxf::Unexpected{GXF_EXCEEDING_PREALLOCATED_SIZE}; + } + m[i] = (static_cast(data[j].value()) << 24 & 0xFF000000) + | (static_cast(data[j + 1].value()) << 16 & 0x00FF0000) + | (static_cast(data[j + 2].value()) << 8 & 0x0000FF00) + | (static_cast(data[j + 3].value()) << 0 & 0x000000FF); + j += 4; + } + for (; i < 64; i++) { + m[i] = SIG1(m[i - 2]) + m[i - 7] + SIG0(m[i - 15]) + m[i - 16]; + } + + uint32_t a = ctx.state[0]; + uint32_t b = ctx.state[1]; + uint32_t c = ctx.state[2]; + uint32_t d = ctx.state[3]; + uint32_t e = ctx.state[4]; + uint32_t f = ctx.state[5]; + uint32_t g = ctx.state[6]; + uint32_t h = ctx.state[7]; + uint32_t t1; + uint32_t t2; + for (i = 0; i < 64; ++i) { + t1 = h + EP1(e) + CH(e, f, g) + K[i] + m[i]; + t2 = EP0(a) + MAJ(a, b, c); + h = g; + g = f; + f = e; + e = d + t1; + d = c; + c = b; + b = a; + a = t1 + t2; + } + + ctx.state[0] += a; + ctx.state[1] += b; + ctx.state[2] += c; + ctx.state[3] += d; + ctx.state[4] += e; + ctx.state[5] += f; + ctx.state[6] += g; + ctx.state[7] += h; + + return gxf::Success; +} + +gxf::Expected SHA256::Finalize(SHA256_CTX& ctx) { + uint32_t i = ctx.datalen; + + if (ctx.datalen < 56) { + ctx.data[i] = 0x80; + i++; + + while (i < 56) { + ctx.data[i] = 0x00; + i++; + } + } else { + ctx.data[i] = 0x80; + i++; + + while (i < 64) { + ctx.data[i] = 0x00; + i++; + } + + auto result = Transform(ctx, Span(ctx.data)); + if (!result) { + return gxf::ForwardError(result); + } + std::memset(ctx.data, 0, 56); + } + + DBL_INT_ADD(ctx.bitlen[0], ctx.bitlen[1], ctx.datalen * 8); + ctx.data[63] = static_cast(ctx.bitlen[0] >> 0 & 0x000000FF); + ctx.data[62] = static_cast(ctx.bitlen[0] >> 8 & 0x000000FF); + ctx.data[61] = static_cast(ctx.bitlen[0] >> 16 & 0x000000FF); + ctx.data[60] = static_cast(ctx.bitlen[0] >> 24 & 0x000000FF); + ctx.data[59] = static_cast(ctx.bitlen[1] >> 0 & 0x000000FF); + ctx.data[58] = static_cast(ctx.bitlen[1] >> 8 & 0x000000FF); + ctx.data[57] = static_cast(ctx.bitlen[1] >> 16 & 0x000000FF); + ctx.data[56] = static_cast(ctx.bitlen[1] >> 24 & 0x000000FF); + + auto result = Transform(ctx, Span(ctx.data)); + if (!result) { + return gxf::ForwardError(result); + } + + Result hash; + for (i = 0; i < 4; ++i) { + hash[i] = static_cast(ctx.state[0] >> (24 - i * 8) & 0x000000FF); + hash[i + 4] = static_cast(ctx.state[1] >> (24 - i * 8) & 0x000000FF); + hash[i + 8] = static_cast(ctx.state[2] >> (24 - i * 8) & 0x000000FF); + hash[i + 12] = static_cast(ctx.state[3] >> (24 - i * 8) & 0x000000FF); + hash[i + 16] = static_cast(ctx.state[4] >> (24 - i * 8) & 0x000000FF); + hash[i + 20] = static_cast(ctx.state[5] >> (24 - i * 8) & 0x000000FF); + hash[i + 24] = static_cast(ctx.state[6] >> (24 - i * 8) & 0x000000FF); + hash[i + 28] = static_cast(ctx.state[7] >> (24 - i * 8) & 0x000000FF); + } + + return hash; +} + +gxf::Expected SHA256::ToString(const Result& hash) { + constexpr char INT2HEX[] = { '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; + + String text; + for (auto&& value : hash) { + const uint8_t upper = static_cast(value >> 4 & 0x0F); + const uint8_t lower = static_cast(value >> 0 & 0x0F); + + auto result = text.append(INT2HEX[upper]) + .and_then([&]() { return text.append(INT2HEX[lower]); }); + if (!result) { + return gxf::Unexpected{GXF_EXCEEDING_PREALLOCATED_SIZE}; + } + } + + return text; +} + +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/gems/hash/sha256.hpp b/isaac_ros_ess/gxf/ess/gems/hash/sha256.hpp new file mode 100644 index 0000000..0a217c3 --- /dev/null +++ b/isaac_ros_ess/gxf/ess/gems/hash/sha256.hpp @@ -0,0 +1,80 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +#pragma once + +#include + +#include "common/fixed_string.hpp" +#include "common/span.hpp" +#include "gxf/core/expected.hpp" + +namespace nvidia { +namespace isaac { + +/// The SHA256 class Generates the SHA256 sum of a given string +/// Adapted from https://www.programmingalgorithms.com/algorithm/sha256 +class SHA256 { + public: + static constexpr size_t HASH_LENGTH = 32; + using Result = std::array; + using String = FixedString; + + static gxf::Expected Hash(const Span data); + static gxf::Expected Hash(const Span data); + + SHA256() : context_{}, hash_{} { Initialize(context_); } + + /// Reset hasher to be fed again + void reset() { Initialize(context_); } + + /// Hash a given array + gxf::Expected hashData(const Span data) { return Update(context_, data); } + + /// Finalize computation of the hash, i.e. make solution available through `hash()` + gxf::Expected finalize() { + return Finalize(context_) + .map([&](Result result) { + hash_ = result; + return gxf::Success; + }); + } + + /// Return hashed result + const Result& hash() const { return hash_; } + /// Return base64 encoding of the hash + gxf::Expected toString() const { return ToString(hash_); } + + private: + struct SHA256_CTX { + uint8_t data[64]; + uint32_t datalen; + uint32_t bitlen[2]; + uint32_t state[8]; + }; + + static void Initialize(SHA256_CTX& ctx); + static gxf::Expected Update(SHA256_CTX& ctx, const Span data); + static gxf::Expected Transform(SHA256_CTX& ctx, const Span data); + static gxf::Expected Finalize(SHA256_CTX& ctx); + static gxf::Expected ToString(const Result& hash); + + SHA256_CTX context_; + Result hash_; +}; + +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/gems/video_buffer/allocator.hpp b/isaac_ros_ess/gxf/ess/gems/video_buffer/allocator.hpp new file mode 100644 index 0000000..dc910d5 --- /dev/null +++ b/isaac_ros_ess/gxf/ess/gems/video_buffer/allocator.hpp @@ -0,0 +1,276 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +#pragma once + +#include +#include +#include "gxf/multimedia/video.hpp" +#include "gxf/std/allocator.hpp" + +namespace nvidia { +namespace isaac { + +template +struct NoPaddingColorPlanes {}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) : planes({gxf::ColorPlane("RGB", 3, width * 3)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("BGR", 3, width * 3)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("RGBA", 4, width * 4)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("BGRA", 4, width * 4)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("RGB", 6, width * 6)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("BGR", 6, width * 6)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("RGB", 12, width * 12)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("BGR", 12, width * 12)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("gray", 1, width)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("gray", 2, width * 2)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("gray", 4, width * 4)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("gray", 4, width * 4)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("Y", 1, width), + nvidia::gxf::ColorPlane("UV", 2, width)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("Y", 1, width), + nvidia::gxf::ColorPlane("UV", 2, width)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("Y", 1, width), + nvidia::gxf::ColorPlane("UV", 2, width * 2)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("Y", 1, width), + nvidia::gxf::ColorPlane("UV", 2, width * 2)}) {} + std::array planes; +}; + +template<> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(size_t width) + : planes({gxf::ColorPlane("R", 1, width), gxf::ColorPlane("G", 1, width), + gxf::ColorPlane("B", 1, width)}) {} + std::array planes; +}; + +template<> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(size_t width) + : planes({gxf::ColorPlane("R", 2, width * 2), gxf::ColorPlane("G", 2, width * 2), + gxf::ColorPlane("B", 2, width * 2)}) {} + std::array planes; +}; + +template<> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(size_t width) + : planes( + {gxf::ColorPlane("R", 4, width * 4), gxf::ColorPlane("G", 4, width * 4), + gxf::ColorPlane("B", 4, width * 4)}) {} + std::array planes; +}; + +template<> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(size_t width) + : planes({gxf::ColorPlane("B", 1, width), gxf::ColorPlane("G", 1, width), + gxf::ColorPlane("R", 1, width)}) {} + std::array planes; +}; + +template<> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(size_t width) + : planes( + {gxf::ColorPlane("R", 2, width * 2), gxf::ColorPlane("G", 2, width * 2), + gxf::ColorPlane("B", 2, width * 2)}) {} + std::array planes; +}; + +template<> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(size_t width) + : planes( + {gxf::ColorPlane("B", 4, width * 4), gxf::ColorPlane("G", 4, width * 4), + gxf::ColorPlane("R", 4, width * 4)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("D", 4, width * 4)}) {} + std::array planes; +}; + +template <> +struct NoPaddingColorPlanes { + explicit NoPaddingColorPlanes(uint32_t width) + : planes({nvidia::gxf::ColorPlane("D", 8, width * 8)}) {} + std::array planes; +}; + +// This includes the list of video buffer formats that supported for the allocator +constexpr bool IsSupportedVideoFormat(const gxf::VideoFormat format) { + return format == gxf::VideoFormat::GXF_VIDEO_FORMAT_RGB || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_BGR || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_RGBA || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_BGRA || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_RGB16 || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_BGR16 || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_RGB32 || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_BGR32 || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_GRAY || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_GRAY16 || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_GRAY32 || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_GRAY32F || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_R8_G8_B8 || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_B8_G8_R8 || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_R16_G16_B16 || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_B16_G16_R16 || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_R32_G32_B32 || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_B32_G32_R32 || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_NV12 || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_NV12_ER || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_NV24 || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_NV24_ER || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_D32F || + format == gxf::VideoFormat::GXF_VIDEO_FORMAT_D64F; +} + +template::type* = nullptr> +gxf::Expected AllocateUnpaddedVideoBuffer( + gxf::Handle frame, uint32_t width, uint32_t height, + gxf::MemoryStorageType storage_type, + gxf::Handle allocator) { + GXF_LOG_ERROR("Received unsupported video format!"); + return gxf::Unexpected{GXF_FAILURE}; +} + +template::type* = nullptr> +gxf::Expected AllocateUnpaddedVideoBuffer( + gxf::Handle frame, uint32_t width, uint32_t height, + gxf::MemoryStorageType storage_type, gxf::Handle allocator) { + if (width % 2 != 0 || height % 2 != 0) { + GXF_LOG_ERROR( + "Error: expected even width and height but received %u width and %u height", + width, height); + return gxf::Unexpected{GXF_FAILURE}; + } + NoPaddingColorPlanes nopadding_planes(width); + gxf::VideoFormatSize video_format_size; + auto size = video_format_size.size(width, height, nopadding_planes.planes); + std::vector color_planes{ + nopadding_planes.planes.begin(), nopadding_planes.planes.end()}; + gxf::VideoBufferInfo buffer_info{width, height, T, color_planes, + gxf::SurfaceLayout::GXF_SURFACE_LAYOUT_PITCH_LINEAR}; + return frame->resizeCustom(buffer_info, size, storage_type, allocator); +} + +template +gxf::Expected AllocateVideoBuffer( + gxf::Handle frame, uint32_t width, uint32_t height, + gxf::SurfaceLayout layout, gxf::MemoryStorageType storage_type, + gxf::Handle allocator) { + return frame->resize(width, height, layout, storage_type, allocator); +} + +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/thresholder/CMakeLists.txt b/isaac_ros_ess/gxf/thresholder/CMakeLists.txt new file mode 100644 index 0000000..f88e1ce --- /dev/null +++ b/isaac_ros_ess/gxf/thresholder/CMakeLists.txt @@ -0,0 +1,61 @@ +# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +project(gxf_thresholder LANGUAGES C CXX) + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-fPIC -w) +endif() +enable_language(CUDA) + +# Dependencies +find_package(Eigen3 3.3 REQUIRED NO_MODULE) +find_package(CUDAToolkit) +find_package(yaml-cpp) +find_package(GXF ${ISAAC_ROS_GXF_VERSION} MODULE REQUIRED + COMPONENTS + core + cuda + multimedia + serialization + isaac_messages +) + +set(CMAKE_INCLUDE_CURRENT_DIR TRUE) + +# Create extension +add_library(gxf_thresholder SHARED + extensions/video_buffer_utils/video_buffer_thresholder_extension.cpp + extensions/video_buffer_utils/components/video_buffer_thresholder.cpp + extensions/video_buffer_utils/components/video_buffer_thresholder.hpp + extensions/video_buffer_utils/gems/video_buffer_thresholder.cu + extensions/video_buffer_utils/gems/video_buffer_thresholder.cu.hpp +) + +target_link_libraries(gxf_thresholder + PRIVATE + Eigen3::Eigen + GXF::isaac_messages + PUBLIC + GXF::core + GXF::cuda + GXF::multimedia + CUDA::cudart + yaml-cpp +) + +target_compile_options(gxf_thresholder PUBLIC -fPIC) diff --git a/isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/components/video_buffer_thresholder.cpp b/isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/components/video_buffer_thresholder.cpp new file mode 100644 index 0000000..eca73d3 --- /dev/null +++ b/isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/components/video_buffer_thresholder.cpp @@ -0,0 +1,237 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +#include "extensions/video_buffer_utils/components/video_buffer_thresholder.hpp" + +#include +#include +#include + +#include "extensions/messages/camera_message.hpp" +#include "extensions/video_buffer_utils/gems/video_buffer_thresholder.cu.hpp" +#include "gems/gxf_helpers/expected_macro.hpp" +#include "gems/video_buffer/allocator.hpp" + +namespace nvidia { +namespace isaac { + + +gxf::Expected AddInputTimestampToOutput(gxf::Entity& output, gxf::Entity input) { + std::string timestamp_name{"timestamp"}; + auto maybe_timestamp = input.get(timestamp_name.c_str()); + + // Default to unnamed + if (!maybe_timestamp) { + maybe_timestamp = input.get(); + } + + if (!maybe_timestamp) { + GXF_LOG_ERROR("Failed to get input timestamp!"); + return gxf::ForwardError(maybe_timestamp); + } + + auto maybe_out_timestamp = output.add(timestamp_name.c_str()); + if (!maybe_out_timestamp) { + GXF_LOG_ERROR("Failed to add timestamp to output message!"); + return gxf::ForwardError(maybe_out_timestamp); + } + + *maybe_out_timestamp.value() = *maybe_timestamp.value(); + return gxf::Success; +} + +gxf_result_t VideoBufferThresholder::registerInterface(gxf::Registrar* registrar) { + gxf::Expected result; + + result &= registrar->parameter( + image_input_, "image_input", "Input image", + "Incoming VideoBuffer image to be masked."); + result &= registrar->parameter( + mask_input_, "mask_input", "Input Mask", + "Incoming VideoBuffer mask. It must have the same dimension (with, height) as input image."); + result &= registrar->parameter( + threshold_, "threshold", "Threshold", + "Value used to threshold image pixels based on the mask value, i.e., " + "if mask[index] <= threshold: image[index] = fill_value."); + result &= registrar->parameter( + fill_value_float_, "fill_value_float", "Fill Value float", + "Value to fill the masked pixels, to be used only for float input images.", 0.0f); + result &= registrar->parameter( + fill_value_rgb_, "fill_value_rgb", "Fill Value RGB", + "Value to fill the masked pixels, to be used only for RGB input images.", Vector3i(0, 0, 0)); + result &= registrar->parameter( + masked_output_, "masked_output", "Masked output image", + "Masked output image, after threshold."); + result &= registrar->parameter( + allocator_, "allocator", "Allocator", + "Allocator to allocate output messages."); + result &= registrar->parameter( + video_buffer_name_, "video_buffer_name", "Video Buffer Name", + "Name of the VideoBuffer component to use.", + gxf::Registrar::NoDefaultParameter(), GXF_PARAMETER_FLAGS_OPTIONAL); + + return gxf::ToResultCode(result); +} + +gxf_result_t VideoBufferThresholder::start() { + return GXF_SUCCESS; +} + +gxf_result_t VideoBufferThresholder::stop() { + return GXF_SUCCESS; +} + +gxf_result_t VideoBufferThresholder::tick() { + // Read input message and validate them + gxf::Entity image_entity = GXF_UNWRAP_OR_RETURN(image_input_.get()->receive()); + gxf::Entity mask_entity = GXF_UNWRAP_OR_RETURN(mask_input_.get()->receive()); + gxf::Entity output_entity; + + gxf::Handle mask_video_buffer = + GXF_UNWRAP_OR_RETURN(mask_entity.get()); + gxf::Handle image_video_buffer, tx_video_buffer; + + if (!IsCameraMessage(image_entity).value()) { + auto video_buffer_name = video_buffer_name_.try_get(); + const char* name = video_buffer_name ? video_buffer_name->c_str() : nullptr; + image_video_buffer = GXF_UNWRAP_OR_RETURN(image_entity.get(name)); + const gxf::VideoBufferInfo& video_info = image_video_buffer->video_frame_info(); + output_entity = GXF_UNWRAP_OR_RETURN(gxf::Entity::New(context())); + tx_video_buffer = GXF_UNWRAP_OR_RETURN(output_entity.add(name)); + if (video_info.color_format == gxf::VideoFormat::GXF_VIDEO_FORMAT_D32F) { + GXF_RETURN_IF_ERROR(AllocateUnpaddedVideoBuffer( + tx_video_buffer, video_info.width, video_info.height, + gxf::MemoryStorageType::kDevice, allocator_)); + } else if (video_info.color_format == gxf::VideoFormat::GXF_VIDEO_FORMAT_RGB) { + GXF_RETURN_IF_ERROR(AllocateUnpaddedVideoBuffer( + tx_video_buffer, video_info.width, video_info.height, + gxf::MemoryStorageType::kDevice, allocator_)); + } + AddInputTimestampToOutput(output_entity, image_entity); + } else { + CameraMessageParts image_message = GXF_UNWRAP_OR_RETURN(GetCameraMessage(image_entity)); + image_video_buffer = image_message.frame; + gxf::Expected camera_message = CreateCameraMessage( + context(), image_video_buffer->video_frame_info(), image_video_buffer->size(), + static_cast(image_video_buffer->storage_type()), allocator_); + if (!camera_message) { + return gxf::ToResultCode(camera_message); + } + + *camera_message->intrinsics = *image_message.intrinsics; + *camera_message->extrinsics = *image_message.extrinsics; + *camera_message->sequence_number = *image_message.sequence_number; + *camera_message->timestamp = *image_message.timestamp; + tx_video_buffer = camera_message->frame; + output_entity = camera_message->entity; + } + + // Validate video buffer and perform thresholding + GXF_RETURN_IF_ERROR(validateImageMessage(image_video_buffer)); + GXF_RETURN_IF_ERROR(validateMaskMessage(image_video_buffer, mask_video_buffer)); + GXF_RETURN_IF_ERROR(thresholdImage(image_video_buffer, mask_video_buffer, tx_video_buffer)); + GXF_RETURN_IF_ERROR(gxf::ToResultCode(masked_output_->publish(output_entity))); + + return GXF_SUCCESS; +} + +gxf_result_t VideoBufferThresholder::thresholdImage( + const gxf::Handle image_video_buffer, + const gxf::Handle mask_video_buffer, + gxf::Handle output_video_buffer) const { + const gxf::VideoBufferInfo& video_info = image_video_buffer->video_frame_info(); + uint32_t buffer_width = video_info.width; + uint32_t buffer_height = video_info.height; + + // Perform thresholding + if (video_info.color_format == gxf::VideoFormat::GXF_VIDEO_FORMAT_RGB) { + threshold_image_cuda( + reinterpret_cast(image_video_buffer->pointer()), + reinterpret_cast(mask_video_buffer->pointer()), + reinterpret_cast(output_video_buffer->pointer()), + {static_cast(buffer_width), static_cast(buffer_height)}, + threshold_.get(), + toUchar3(fill_value_rgb_.get())); + } else if (video_info.color_format == gxf::VideoFormat::GXF_VIDEO_FORMAT_D32F) { + threshold_image_cuda( + reinterpret_cast(image_video_buffer->pointer()), + reinterpret_cast(mask_video_buffer->pointer()), + reinterpret_cast(output_video_buffer->pointer()), + {static_cast(buffer_width), static_cast(buffer_height)}, + threshold_.get(), + fill_value_float_.get()); + } else { + return GXF_INVALID_DATA_FORMAT; + } + + return GXF_SUCCESS; +} + +uchar3 VideoBufferThresholder::toUchar3(const Vector3i& vector3i) const { + return { + static_cast(vector3i.x()), + static_cast(vector3i.y()), + static_cast(vector3i.z())}; +} + +gxf_result_t VideoBufferThresholder::validateImageMessage( + const gxf::Handle image) const { + if (image->storage_type() != gxf::MemoryStorageType::kDevice) { + GXF_LOG_ERROR("Input image must be stored in " + "gxf::MemoryStorageType::kDevice"); + return GXF_INVALID_DATA_FORMAT; + } + + const gxf::VideoBufferInfo image_info = image->video_frame_info(); + if (image_info.color_format != gxf::VideoFormat::GXF_VIDEO_FORMAT_RGB && + image_info.color_format != gxf::VideoFormat::GXF_VIDEO_FORMAT_D32F) { + GXF_LOG_ERROR("Input image must be of type " + "gxf::VideoFormat::GXF_VIDEO_FORMAT_D32F or" + "gxf::VideoFormat::GXF_VIDEO_FORMAT_RGBA"); + return GXF_INVALID_DATA_FORMAT; + } + + return GXF_SUCCESS; +} + +gxf_result_t VideoBufferThresholder::validateMaskMessage( + const gxf::Handle image, + const gxf::Handle mask) const { + if (image->storage_type() != mask->storage_type()) { + GXF_LOG_ERROR("Input image image must be stored in the same type as input depth"); + return GXF_INVALID_DATA_FORMAT; + } + + const gxf::VideoBufferInfo mask_info = mask->video_frame_info(); + if (mask_info.color_format != gxf::VideoFormat::GXF_VIDEO_FORMAT_GRAY32F && + mask_info.color_format != gxf::VideoFormat::GXF_VIDEO_FORMAT_D32F) { + GXF_LOG_ERROR("Input mask image must be of type " + "gxf::VideoFormat::GXF_VIDEO_FORMAT_GRAY32F or" + "gxf::VideoFormat::GXF_VIDEO_FORMAT_D32F"); + return GXF_INVALID_DATA_FORMAT; + } + + const gxf::VideoBufferInfo image_info = image->video_frame_info(); + if (mask_info.width != image_info.width || mask_info.height != image_info.height) { + GXF_LOG_ERROR("Input image dimensions must match input mask dimensions"); + return GXF_INVALID_DATA_FORMAT; + } + return GXF_SUCCESS; +} + +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/components/video_buffer_thresholder.hpp b/isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/components/video_buffer_thresholder.hpp new file mode 100644 index 0000000..efa8860 --- /dev/null +++ b/isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/components/video_buffer_thresholder.hpp @@ -0,0 +1,71 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 +#pragma once + +#include +#include + +#include "engine/core/math/types.hpp" +#include "extensions/gxf_helpers/parameter_parser_isaac.hpp" +#include "extensions/gxf_helpers/parameter_wrapper_isaac.hpp" +#include "gxf/std/allocator.hpp" +#include "gxf/std/codelet.hpp" +#include "gxf/std/parameter_parser_std.hpp" +#include "gxf/std/receiver.hpp" +#include "gxf/std/transmitter.hpp" + +#include "gxf/multimedia/video.hpp" + +namespace nvidia { +namespace isaac { + +// Image threshold for VideoBuffers +// +// This codelet consumes a VideoBuffer image (float or RGB), a VideoBuffer mask (float), a threshold +// (float) for the mask, and a fill value (float or RGB) to "paint" the pixels in the image that +// are under or equal to the threshold in the mask image: +// if mask[index] <= threshold: image[index] = fill_value +class VideoBufferThresholder : public gxf::Codelet { + public: + gxf_result_t registerInterface(gxf::Registrar* registrar) override; + gxf_result_t start() override; + gxf_result_t stop() override; + gxf_result_t tick() override; + + private: + gxf_result_t thresholdImage( + const gxf::Handle image_video_buffer, + const gxf::Handle mask_video_buffer, + gxf::Handle output_video_buffer) const; + gxf_result_t validateImageMessage(const gxf::Handle image) const; + gxf_result_t validateMaskMessage(const gxf::Handle image, + const gxf::Handle mask) const; + uchar3 toUchar3(const Vector3i& vector3i) const; + + private: + gxf::Parameter> image_input_; + gxf::Parameter> mask_input_; + gxf::Parameter threshold_; + gxf::Parameter fill_value_float_; + gxf::Parameter fill_value_rgb_; + gxf::Parameter> masked_output_; + gxf::Parameter> allocator_; + gxf::Parameter video_buffer_name_; +}; + +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/gems/video_buffer_thresholder.cu b/isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/gems/video_buffer_thresholder.cu new file mode 100644 index 0000000..f9957d0 --- /dev/null +++ b/isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/gems/video_buffer_thresholder.cu @@ -0,0 +1,101 @@ +/* +Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. + +NVIDIA CORPORATION and its licensors retain all intellectual property +and proprietary rights in and to this software, related documentation +and any modifications thereto. Any use, reproduction, disclosure or +distribution of this software and related documentation without an express +license agreement from NVIDIA CORPORATION is strictly prohibited. +*/ + +#include "video_buffer_thresholder.cu.hpp" + +#include +#include + +#include "cuda.h" +#include "engine/gems/cuda_utils/launch_utils.hpp" + +namespace nvidia { +namespace isaac { +namespace { + +__constant__ uint32_t NUM_CHANNELS = 3; + +__device__ float get_pixel(const float * input) +{ + return *input; +} + +__device__ void set_pixel(const float pixel, float* output) +{ + *output = pixel; +} + +__global__ void threshold_image_cuda_kernel( + const float * image, const float * mask, float * output, + float threshold, float fill_value, int2 image_size) +{ + const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; + const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; + if (x < image_size.x && y < image_size.y) + { + const uint32_t index_mask = (y * image_size.x + x); + const uint32_t index_image = index_mask; + float pixel_out = (mask[index_mask] > threshold)? get_pixel(image + index_image) : fill_value; + set_pixel(pixel_out, output + index_image); + } +} + +__device__ uchar3 get_pixel_uchar3(const unsigned char * input) +{ + return make_uchar3(input[0], input[1], input[2]); +} + +__device__ void set_pixel_uchar3(const uchar3 pixel, unsigned char * output) +{ + output[0] = pixel.x; + output[1] = pixel.y; + output[2] = pixel.z; +} + +__global__ void threshold_image_cuda_kernel( + const unsigned char * image, const float * mask, unsigned char * output, + float threshold, uchar3 fill_value, int2 image_size) +{ + const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; + const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; + if (x < image_size.x && y < image_size.y) + { + const uint32_t index_mask = (y * image_size.x + x); + const uint32_t index_image = index_mask * NUM_CHANNELS; + uchar3 pixel_out = (mask[index_mask] > threshold) ? + get_pixel_uchar3(image + index_image) : fill_value; + set_pixel_uchar3(pixel_out, output + index_image); + } +} + +} // namespace + +void threshold_image_cuda(const float* image, const float* mask, + float* output, const int2 image_size, const float threshold, + const float fill_value) { + cudaMemset(output, 0, image_size.x * image_size.y * sizeof(float)); + dim3 block(16, 16); + dim3 grid(DivRoundUp(image_size.x, 16), DivRoundUp(image_size.y, 16), 1); + threshold_image_cuda_kernel << < grid, block >> > (image, mask, output, threshold, fill_value, + image_size); +} + +void threshold_image_cuda(const unsigned char * image, const float * mask, + unsigned char * output, const int2 image_size, const float threshold, + const uchar3 fill_value) { + cudaMemset(output, 0, image_size.x * image_size.y * sizeof(uint8_t) * NUM_CHANNELS); + dim3 block(16, 16); + dim3 grid(DivRoundUp(image_size.x, 16), DivRoundUp(image_size.y, 16), 1); + threshold_image_cuda_kernel << < grid, block >> > (image, mask, output, threshold, fill_value, + image_size); +} + +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/NppUtils.h b/isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/gems/video_buffer_thresholder.cu.hpp similarity index 50% rename from isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/NppUtils.h rename to isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/gems/video_buffer_thresholder.cu.hpp index 398ef8f..af13688 100644 --- a/isaac_ros_ess/gxf/ess/cvcore/src/tensor_ops/NppUtils.h +++ b/isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/gems/video_buffer_thresholder.cu.hpp @@ -1,5 +1,5 @@ // SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -// Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,17 +15,19 @@ // // SPDX-License-Identifier: Apache-2.0 -#ifndef CVCORE_NPP_UTILS_H -#define CVCORE_NPP_UTILS_H +#pragma once +#include "engine/core/math/types.hpp" +#include "cuda_runtime.h" -#include +namespace nvidia { +namespace isaac { -#include +void threshold_image_cuda(const float* image, const float* mask, + float* output, const int2 image_size, const float threshold, + const float fill_value); -namespace cvcore { namespace tensor_ops { - -NppStreamContext GetNppStreamContext(cudaStream_t stream); - -}} // namespace cvcore::tensor_ops - -#endif // CVCORE_NPP_UTILS_H +void threshold_image_cuda(const unsigned char * image, const float * mask, + unsigned char * output, const int2 image_size, const float threshold, + const uchar3 fill_value); +} // namespace isaac +} // namespace nvidia diff --git a/isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/video_buffer_thresholder_extension.cpp b/isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/video_buffer_thresholder_extension.cpp new file mode 100644 index 0000000..f1406e0 --- /dev/null +++ b/isaac_ros_ess/gxf/thresholder/extensions/video_buffer_utils/video_buffer_thresholder_extension.cpp @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +// Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +#include "extensions/video_buffer_utils/components/video_buffer_thresholder.hpp" +#include "gxf/std/extension_factory_helper.hpp" + +GXF_EXT_FACTORY_BEGIN() + +// This is an extension made out of isaac::video_buffer_utils +GXF_EXT_FACTORY_SET_INFO( + 0xaf5b39a0485f4f28, 0x83eb359d36049b68, "ThresholderExtension", + "Extension containing a video buffer threaholder extension", "NVIDIA", "0.0.1", + "LICENSE"); + +GXF_EXT_FACTORY_ADD( + 0xc757549c430d11ee, 0xbb0073c7d954a493, + nvidia::isaac::VideoBufferThresholder, nvidia::gxf::Codelet, + "Thresholds an input VideoBuffers given a mask and a threshold for the mask"); + +GXF_EXT_FACTORY_END() diff --git a/isaac_ros_ess/include/isaac_ros_ess/ess_disparity_node.hpp b/isaac_ros_ess/include/isaac_ros_ess/ess_disparity_node.hpp index f11f87e..23c71a6 100644 --- a/isaac_ros_ess/include/isaac_ros_ess/ess_disparity_node.hpp +++ b/isaac_ros_ess/include/isaac_ros_ess/ess_disparity_node.hpp @@ -31,7 +31,7 @@ namespace nvidia { namespace isaac_ros { -namespace dnn_stereo_disparity +namespace dnn_stereo_depth { class ESSDisparityNode : public nitros::NitrosNode @@ -53,12 +53,14 @@ class ESSDisparityNode : public nitros::NitrosNode const int input_layer_width_; const int input_layer_height_; const std::string model_input_type_; + const std::string onnx_file_path_; const std::string engine_file_path_; const std::vector input_layers_name_; const std::vector output_layers_name_; + const float threshold_; }; -} // namespace dnn_stereo_disparity +} // namespace dnn_stereo_depth } // namespace isaac_ros } // namespace nvidia diff --git a/isaac_ros_ess/isaac_ros_ess/__init__.py b/isaac_ros_ess/isaac_ros_ess/__init__.py index e69de29..6bbcb13 100644 --- a/isaac_ros_ess/isaac_ros_ess/__init__.py +++ b/isaac_ros_ess/isaac_ros_ess/__init__.py @@ -0,0 +1,16 @@ +# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +# Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/isaac_ros_ess/launch/isaac_ros_argus_ess.launch.py b/isaac_ros_ess/launch/isaac_ros_argus_ess.launch.py index 5656258..c08ddc1 100644 --- a/isaac_ros_ess/launch/isaac_ros_argus_ess.launch.py +++ b/isaac_ros_ess/launch/isaac_ros_argus_ess.launch.py @@ -28,8 +28,14 @@ def generate_launch_description(): 'engine_file_path', default_value='', description='The absolute path to the ESS engine plan.'), + DeclareLaunchArgument( + 'threshold', + default_value='0.9', + description='Threshold value ranges between 0.0 and 1.0 ' + 'for filtering disparity with confidence.'), ] engine_file_path = LaunchConfiguration('engine_file_path') + threshold = LaunchConfiguration('threshold') argus_stereo_node = ComposableNode( name='argus_stereo', @@ -41,17 +47,49 @@ def generate_launch_description(): }], ) + left_resize_node = ComposableNode( + name='left_resize_node', + package='isaac_ros_image_proc', + plugin='nvidia::isaac_ros::image_proc::ResizeNode', + parameters=[{ + 'output_width': 960, + 'output_height': 576, + 'keep_aspect_ratio': True + }], + remappings=[ + ('camera_info', 'left/camerainfo'), + ('image', 'left/image_raw'), + ('resize/camera_info', 'left/camerainfo_resize'), + ('resize/image', 'left/image_resize')] + ) + + right_resize_node = ComposableNode( + name='right_resize_node', + package='isaac_ros_image_proc', + plugin='nvidia::isaac_ros::image_proc::ResizeNode', + parameters=[{ + 'output_width': 960, + 'output_height': 576, + 'keep_aspect_ratio': True + }], + remappings=[ + ('camera_info', 'right/camerainfo'), + ('image', 'right/image_raw'), + ('resize/camera_info', 'right/camerainfo_resize'), + ('resize/image', 'right/image_resize')] + ) + left_rectify_node = ComposableNode( name='left_rectify_node', package='isaac_ros_image_proc', plugin='nvidia::isaac_ros::image_proc::RectifyNode', parameters=[{ - 'output_width': 1920, - 'output_height': 1200, + 'output_width': 960, + 'output_height': 576, }], remappings=[ - ('image_raw', 'left/image_raw'), - ('camera_info', 'left/camerainfo'), + ('image_raw', 'left/image_resize'), + ('camera_info', 'left/camerainfo_resize'), ('image_rect', 'left/image_rect'), ('camera_info_rect', 'left/camera_info_rect') ] @@ -62,12 +100,12 @@ def generate_launch_description(): package='isaac_ros_image_proc', plugin='nvidia::isaac_ros::image_proc::RectifyNode', parameters=[{ - 'output_width': 1920, - 'output_height': 1200, + 'output_width': 960, + 'output_height': 576, }], remappings=[ - ('image_raw', 'right/image_raw'), - ('camera_info', 'right/camerainfo'), + ('image_raw', 'right/image_resize'), + ('camera_info', 'right/camerainfo_resize'), ('image_rect', 'right/image_rect'), ('camera_info_rect', 'right/camera_info_rect') ] @@ -76,8 +114,9 @@ def generate_launch_description(): disparity_node = ComposableNode( name='disparity', package='isaac_ros_ess', - plugin='nvidia::isaac_ros::dnn_stereo_disparity::ESSDisparityNode', - parameters=[{'engine_file_path': engine_file_path}], + plugin='nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode', + parameters=[{'engine_file_path': engine_file_path, + 'threshold': threshold}], remappings=[ ('left/camera_info', 'left/camera_info_rect'), ('right/camera_info', 'right/camera_info_rect') @@ -106,7 +145,8 @@ def generate_launch_description(): package='rclcpp_components', executable='component_container_mt', composable_node_descriptions=[ - argus_stereo_node, left_rectify_node, right_rectify_node, + argus_stereo_node, left_resize_node, right_resize_node, + left_rectify_node, right_rectify_node, disparity_node, point_cloud_node ], output='screen' diff --git a/isaac_ros_ess/launch/isaac_ros_ess.launch.py b/isaac_ros_ess/launch/isaac_ros_ess.launch.py index 08fc216..0916d83 100644 --- a/isaac_ros_ess/launch/isaac_ros_ess.launch.py +++ b/isaac_ros_ess/launch/isaac_ros_ess.launch.py @@ -28,14 +28,22 @@ def generate_launch_description(): 'engine_file_path', default_value='', description='The absolute path to the ESS engine plan.'), + DeclareLaunchArgument( + 'threshold', + default_value='0.9', + description='Threshold value ranges between 0.0 and 1.0 ' + 'for filtering disparity with confidence.'), ] engine_file_path = LaunchConfiguration('engine_file_path') + threshold = LaunchConfiguration('threshold') disparity_node = ComposableNode( name='disparity', package='isaac_ros_ess', - plugin='nvidia::isaac_ros::dnn_stereo_disparity::ESSDisparityNode', - parameters=[{'engine_file_path': engine_file_path}]) + plugin='nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode', + parameters=[{'engine_file_path': engine_file_path, + 'threshold': threshold}] + ) container = ComposableNodeContainer( name='disparity_container', diff --git a/isaac_ros_ess/launch/isaac_ros_ess_isaac_sim.launch.py b/isaac_ros_ess/launch/isaac_ros_ess_isaac_sim.launch.py index 19dc879..e8417ea 100644 --- a/isaac_ros_ess/launch/isaac_ros_ess_isaac_sim.launch.py +++ b/isaac_ros_ess/launch/isaac_ros_ess_isaac_sim.launch.py @@ -31,18 +31,57 @@ def generate_launch_description(): 'engine_file_path', default_value='', description='The absolute path to the ESS engine plan.'), + DeclareLaunchArgument( + 'threshold', + default_value='0.0', + description='Threshold value ranges between 0.0 and 1.0 ' + 'for filtering disparity with confidence.'), ] engine_file_path = LaunchConfiguration('engine_file_path') + threshold = LaunchConfiguration('threshold') + + image_resize_node_right = ComposableNode( + package='isaac_ros_image_proc', + plugin='nvidia::isaac_ros::image_proc::ResizeNode', + name='image_resize_node_right', + parameters=[{ + 'output_width': 960, + 'output_height': 576, + 'encoding_desired': 'rgb8', + }], + remappings=[ + ('camera_info', 'front_stereo_camera/right_rgb/camerainfo'), + ('image', 'front_stereo_camera/right_rgb/image_raw'), + ('resize/camera_info', 'front_stereo_camera/right_rgb/camerainfo_resize'), + ('resize/image', 'front_stereo_camera/right_rgb/image_resize')] + ) + + image_resize_node_left = ComposableNode( + package='isaac_ros_image_proc', + plugin='nvidia::isaac_ros::image_proc::ResizeNode', + name='image_resize_node_left', + parameters=[{ + 'output_width': 960, + 'output_height': 576, + 'encoding_desired': 'rgb8', + }], + remappings=[ + ('camera_info', 'front_stereo_camera/left_rgb/camerainfo'), + ('image', 'front_stereo_camera/left_rgb/image_raw'), + ('resize/camera_info', 'front_stereo_camera/left_rgb/camerainfo_resize'), + ('resize/image', 'front_stereo_camera/left_rgb/image_resize')] + ) disparity_node = ComposableNode( name='disparity', package='isaac_ros_ess', - plugin='nvidia::isaac_ros::dnn_stereo_disparity::ESSDisparityNode', - parameters=[{'engine_file_path': engine_file_path}], - remappings=[('left/image_rect', 'rgb_left'), - ('left/camera_info', 'camera_info_left'), - ('right/image_rect', 'rgb_right'), - ('right/camera_info', 'camera_info_right')]) + plugin='nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode', + parameters=[{'engine_file_path': engine_file_path, + 'threshold': threshold}], + remappings=[('left/image_rect', 'front_stereo_camera/left_rgb/image_resize'), + ('left/camera_info', 'front_stereo_camera/left_rgb/camerainfo_resize'), + ('right/image_rect', 'front_stereo_camera/right_rgb/image_resize'), + ('right/camera_info', 'front_stereo_camera/right_rgb/camerainfo_resize')]) pointcloud_node = ComposableNode( package='isaac_ros_stereo_image_proc', @@ -51,16 +90,18 @@ def generate_launch_description(): 'use_color': True, 'unit_scaling': 1.0 }], - remappings=[('left/image_rect_color', 'rgb_left'), - ('left/camera_info', 'camera_info_left'), - ('right/camera_info', 'camera_info_right')]) + remappings=[('left/image_rect_color', 'front_stereo_camera/left_rgb/image_resize'), + ('left/camera_info', 'front_stereo_camera/left_rgb/camerainfo_resize'), + ('right/camera_info', 'front_stereo_camera/right_rgb/camerainfo_resize')]) container = ComposableNodeContainer( name='disparity_container', namespace='disparity', package='rclcpp_components', executable='component_container_mt', - composable_node_descriptions=[disparity_node, pointcloud_node], + composable_node_descriptions=[disparity_node, pointcloud_node, + image_resize_node_left, image_resize_node_right + ], output='screen' ) diff --git a/isaac_ros_ess/launch/isaac_ros_ess_realsense.launch.py b/isaac_ros_ess/launch/isaac_ros_ess_realsense.launch.py index 27c1390..1b7bf82 100644 --- a/isaac_ros_ess/launch/isaac_ros_ess_realsense.launch.py +++ b/isaac_ros_ess/launch/isaac_ros_ess_realsense.launch.py @@ -21,6 +21,7 @@ import launch from launch.actions import DeclareLaunchArgument from launch.substitutions import LaunchConfiguration + from launch_ros.actions import ComposableNodeContainer, Node from launch_ros.descriptions import ComposableNode @@ -31,19 +32,26 @@ def generate_launch_description(): 'engine_file_path', default_value='', description='The absolute path to the ESS engine plan.'), + DeclareLaunchArgument( + 'threshold', + default_value='0.9', + description='Threshold value ranges between 0.0 and 1.0 ' + 'for filtering disparity with confidence.'), ] engine_file_path = LaunchConfiguration('engine_file_path') + threshold = LaunchConfiguration('threshold') disparity_node = ComposableNode( name='disparity', package='isaac_ros_ess', - plugin='nvidia::isaac_ros::dnn_stereo_disparity::ESSDisparityNode', - parameters=[{'engine_file_path': engine_file_path}], + plugin='nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode', + parameters=[{'engine_file_path': engine_file_path, + 'threshold': threshold}], remappings=[ - ('left/camera_info', 'infra1/camera_info'), - ('left/image_rect', 'infra1/image_rect_raw'), - ('right/camera_info', 'infra2/camera_info'), - ('right/image_rect', 'infra2/image_rect_raw') + ('left/camera_info', 'infra1/camera_info_resize'), + ('left/image_rect', 'infra1/image_rect_raw_resize'), + ('right/camera_info', 'infra2/camera_info_resize'), + ('right/image_rect', 'infra2/image_rect_raw_resize') ] ) @@ -71,16 +79,49 @@ def generate_launch_description(): ('image', 'infra2/image_rect_raw')] ) + image_resize_node_left = ComposableNode( + package='isaac_ros_image_proc', + plugin='nvidia::isaac_ros::image_proc::ResizeNode', + name='image_resize_left', + parameters=[{ + 'output_width': 960, + 'output_height': 576, + 'keep_aspect_ratio': True + }], + remappings=[ + ('camera_info', 'infra1/camera_info'), + ('image', 'infra1/image_rect_raw'), + ('resize/camera_info', 'infra1/camera_info_resize'), + ('resize/image', 'infra1/image_rect_raw_resize')] + ) + + image_resize_node_right = ComposableNode( + package='isaac_ros_image_proc', + plugin='nvidia::isaac_ros::image_proc::ResizeNode', + name='image_resize_right', + parameters=[{ + 'output_width': 960, + 'output_height': 576, + 'keep_aspect_ratio': True + }], + remappings=[ + ('camera_info', 'infra2/camera_info'), + ('image', 'infra2/image_rect_raw'), + ('resize/camera_info', 'infra2/camera_info_resize'), + ('resize/image', 'infra2/image_rect_raw_resize')] + ) + pointcloud_node = ComposableNode( + name='pointcloud_node', package='isaac_ros_stereo_image_proc', plugin='nvidia::isaac_ros::stereo_image_proc::PointCloudNode', parameters=[{ 'use_color': True, 'unit_scaling': 1.0 }], - remappings=[('left/image_rect_color', 'infra1/image_rect_raw'), - ('left/camera_info', 'infra1/camera_info'), - ('right/camera_info', 'infra2/camera_info')] + remappings=[('left/image_rect_color', 'infra1/image_rect_raw_resize'), + ('left/camera_info', 'infra1/camera_info_resize'), + ('right/camera_info', 'infra2/camera_info_resize')] ) # RealSense @@ -107,9 +148,12 @@ def generate_launch_description(): composable_node_descriptions=[disparity_node, image_format_converter_node_left, image_format_converter_node_right, + image_resize_node_left, + image_resize_node_right, pointcloud_node, realsense_node], - output='screen' + output='screen', + arguments=['--ros-args', '--log-level', 'info'] ) rviz_config_path = os.path.join(get_package_share_directory( diff --git a/isaac_ros_ess/launch/isaac_ros_ess_zed.launch.py b/isaac_ros_ess/launch/isaac_ros_ess_zed.launch.py new file mode 100644 index 0000000..3522c18 --- /dev/null +++ b/isaac_ros_ess/launch/isaac_ros_ess_zed.launch.py @@ -0,0 +1,205 @@ +# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +import os + +from ament_index_python.packages import get_package_share_directory + +import launch +from launch.actions import DeclareLaunchArgument +from launch.substitutions import Command, LaunchConfiguration +from launch_ros.actions import ComposableNodeContainer, Node +from launch_ros.descriptions import ComposableNode + + +def generate_launch_description(): + launch_args = [ + DeclareLaunchArgument( + 'engine_file_path', + default_value='', + description='The absolute path to the ESS engine plan.'), + DeclareLaunchArgument( + 'threshold', + default_value='0.9', + description='Threshold value ranges between 0.0 and 1.0 ' + 'for filtering disparity with confidence.'), + ] + engine_file_path = LaunchConfiguration('engine_file_path') + threshold = LaunchConfiguration('threshold') + + disparity_node = ComposableNode( + name='disparity', + package='isaac_ros_ess', + plugin='nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode', + parameters=[{'engine_file_path': engine_file_path, + 'threshold': threshold}], + remappings=[ + ('left/camera_info', 'zed_node/left/camera_info_resize'), + ('left/image_rect', 'zed_node/left/image_rect_color_rgb_resize'), + ('right/camera_info', 'zed_node/right/camera_info_resize'), + ('right/image_rect', 'zed_node/right/image_rect_color_rgb_resize') + ] + ) + + pointcloud_node = ComposableNode( + package='isaac_ros_stereo_image_proc', + plugin='nvidia::isaac_ros::stereo_image_proc::PointCloudNode', + parameters=[{ + 'use_color': True, + 'unit_scaling': 1.0 + }], + remappings=[('left/image_rect_color', 'zed_node/left/image_rect_color_rgb_resize'), + ('left/camera_info', 'zed_node/left/camera_info_resize'), + ('right/camera_info', 'zed_node/right/camera_info_resize')] + ) + + image_format_converter_node_left = ComposableNode( + package='isaac_ros_image_proc', + plugin='nvidia::isaac_ros::image_proc::ImageFormatConverterNode', + name='image_format_node_left', + parameters=[{ + 'encoding_desired': 'rgb8', + }], + remappings=[ + ('image_raw', 'zed_node/left/image_rect_color'), + ('image', 'zed_node/left/image_rect_color_rgb')] + ) + + image_format_converter_node_right = ComposableNode( + package='isaac_ros_image_proc', + plugin='nvidia::isaac_ros::image_proc::ImageFormatConverterNode', + name='image_format_node_right', + parameters=[{ + 'encoding_desired': 'rgb8', + }], + remappings=[ + ('image_raw', 'zed_node/right/image_rect_color'), + ('image', 'zed_node/right/image_rect_color_rgb')] + ) + + image_resize_node_left = ComposableNode( + package='isaac_ros_image_proc', + plugin='nvidia::isaac_ros::image_proc::ResizeNode', + name='image_resize_left', + parameters=[{ + 'output_width': 960, + 'output_height': 576, + 'keep_aspect_ratio': True + }], + remappings=[ + ('camera_info', 'zed_node/left/camera_info'), + ('image', 'zed_node/left/image_rect_color_rgb'), + ('resize/camera_info', 'zed_node/left/camera_info_resize'), + ('resize/image', 'zed_node/left/image_rect_color_rgb_resize')] + ) + + image_resize_node_right = ComposableNode( + package='isaac_ros_image_proc', + plugin='nvidia::isaac_ros::image_proc::ResizeNode', + name='image_resize_right', + parameters=[{ + 'output_width': 960, + 'output_height': 576, + 'keep_aspect_ratio': True + }], + remappings=[ + ('camera_info', 'zed_node/right/camera_info'), + ('image', 'zed_node/right/image_rect_color_rgb'), + ('resize/camera_info', 'zed_node/right/camera_info_resize'), + ('resize/image', 'zed_node/right/image_rect_color_rgb_resize')] + ) + + container = ComposableNodeContainer( + name='disparity_container', + namespace='disparity', + package='rclcpp_components', + executable='component_container_mt', + composable_node_descriptions=[image_format_converter_node_left, + image_format_converter_node_right, + image_resize_node_left, + image_resize_node_right, + disparity_node, + pointcloud_node], + output='screen' + ) + + rviz_config_path = os.path.join(get_package_share_directory( + 'isaac_ros_ess'), 'config', 'isaac_ros_ess_zed.rviz') + + rviz = Node( + package='rviz2', + executable='rviz2', + arguments=['-d', rviz_config_path], + output='screen') + + # Define LaunchDescription variable + ld = launch.LaunchDescription(launch_args + [container, rviz]) + + # https://www.stereolabs.com/docs/ros2/ros2-composition/ + # Camera model (force value) + camera_model = 'zed2i' + + # URDF/xacro file to be loaded by the Robot State Publisher node + xacro_path = os.path.join( + get_package_share_directory('zed_wrapper'), + 'urdf', 'zed_descr.urdf.xacro' + ) + + # ZED Configurations to be loaded by ZED Node + config_common = os.path.join( + get_package_share_directory('isaac_ros_ess'), + 'config', + 'zed.yaml' + ) + + config_camera = os.path.join( + get_package_share_directory('zed_wrapper'), + 'config', + camera_model + '.yaml' + ) + + # Robot State Publisher node + rsp_node = Node( + package='robot_state_publisher', + executable='robot_state_publisher', + name='zed_state_publisher', + output='screen', + parameters=[{ + 'robot_description': Command( + [ + 'xacro', ' ', xacro_path, ' ', + 'camera_name:=', camera_model, ' ', + 'camera_model:=', camera_model + ]) + }] + ) + + # ZED node using manual composition + zed_node = Node( + package='zed_wrapper', + executable='zed_wrapper', + output='screen', + parameters=[ + config_common, # Common parameters + config_camera, # Camera related parameters + ] + ) + + # Add nodes to LaunchDescription + ld.add_action(rsp_node) + ld.add_action(zed_node) + + return ld diff --git a/isaac_ros_ess/package.xml b/isaac_ros_ess/package.xml index 9a4687c..413fde0 100644 --- a/isaac_ros_ess/package.xml +++ b/isaac_ros_ess/package.xml @@ -21,7 +21,7 @@ SPDX-License-Identifier: Apache-2.0 isaac_ros_ess - 0.31.0 + 2.0.0 DNN Stereo Disparity Network for Isaac ROS Xutong Ren @@ -37,6 +37,7 @@ SPDX-License-Identifier: Apache-2.0 isaac_ros_nitros_camera_info_type isaac_ros_nitros_disparity_image_type isaac_ros_nitros_image_type + isaac_ros_image_proc isaac_ros_stereo_image_proc isaac_ros_common diff --git a/isaac_ros_ess/scripts/isaac_ros_ess_visualizer.py b/isaac_ros_ess/scripts/isaac_ros_ess_visualizer.py index 988defb..cff9f80 100755 --- a/isaac_ros_ess/scripts/isaac_ros_ess_visualizer.py +++ b/isaac_ros_ess/scripts/isaac_ros_ess_visualizer.py @@ -43,19 +43,19 @@ def get_args(): parser.add_argument('--enable_rosbag', action='store_true', help='Save output or display it', default=False) parser.add_argument('--rosbag_path', - default='/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_disparity/' + default='/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_depth/' 'resources/rosbags/ess_rosbag', help='Absolute path to your rosbag.') parser.add_argument('--left_image_path', - default='/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_disparity/' + default='/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_depth/' 'resources/examples/left.png', help='Absolute path your left image.') parser.add_argument('--right_image_path', - default='/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_disparity/' + default='/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_depth/' 'resources/examples/right.png', help='Absolute path your right image.') parser.add_argument('--camera_info_path', - default='/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_disparity/' + default='/workspaces/isaac_ros-dev/src/isaac_ros_dnn_stereo_depth/' 'resources/examples/camera.json', help='Absolute path your camera info json file.') args = parser.parse_args() diff --git a/isaac_ros_ess/src/ess_disparity_node.cpp b/isaac_ros_ess/src/ess_disparity_node.cpp index 3acf9ea..ad0b69c 100644 --- a/isaac_ros_ess/src/ess_disparity_node.cpp +++ b/isaac_ros_ess/src/ess_disparity_node.cpp @@ -33,7 +33,7 @@ namespace nvidia { namespace isaac_ros { -namespace dnn_stereo_disparity +namespace dnn_stereo_depth { using nvidia::gxf::optimizer::GraphIOGroupSupportedDataTypesInfoList; @@ -65,7 +65,10 @@ const std::vector> EXTENSIONS = { {"isaac_ros_gxf", "gxf/lib/cuda/libgxf_cuda.so"}, {"isaac_ros_gxf", "gxf/lib/serialization/libgxf_serialization.so"}, {"isaac_ros_gxf", "gxf/lib/libgxf_synchronization.so"}, - {"isaac_ros_stereo_image_proc", "gxf/lib/sgm_disparity/libgxf_disparity_extension.so"}, + {"isaac_ros_ess", "gxf/lib/thresholder/libgxf_thresholder.so"}, + {"isaac_ros_gxf", "gxf/lib/libgxf_isaac_messages.so"}, + {"isaac_ros_image_proc", "gxf/lib/image_proc/libgxf_tensorops.so"}, + {"isaac_ros_stereo_image_proc", "gxf/lib/sgm_disparity/libgxf_sgm.so"}, {"isaac_ros_ess", "gxf/lib/ess/libgxf_cvcore_ess.so"} }; const std::vector PRESET_EXTENSION_SPEC_NAMES = { @@ -133,11 +136,13 @@ ESSDisparityNode::ESSDisparityNode(const rclcpp::NodeOptions & options) input_layer_width_(declare_parameter("input_layer_width", 960)), input_layer_height_(declare_parameter("input_layer_height", 576)), model_input_type_(declare_parameter("model_input_type", "RGB_U8")), + onnx_file_path_(declare_parameter("onnx_file_path", "")), engine_file_path_(declare_parameter("engine_file_path", "")), input_layers_name_(declare_parameter>( "input_layers_name", {"input_left", "input_right"})), output_layers_name_(declare_parameter>( - "output_layers_name", {"output_left"})) + "output_layers_name", {"output_left", "output_conf"})), + threshold_(declare_parameter("threshold", 0.9)) { RCLCPP_DEBUG(get_logger(), "[ESSDisparityNode] Initializing ESSDisparityNode."); @@ -169,17 +174,21 @@ void ESSDisparityNode::postLoadGraphCallback() { // Forward ESSDisparityNode parameters getNitrosContext().setParameterInt32( - "ess", "nvidia::cvcore::ESS", "input_layer_width", input_layer_width_); + "ess", "nvidia::isaac::ESSInference", "input_layer_width", input_layer_width_); getNitrosContext().setParameterInt32( - "ess", "nvidia::cvcore::ESS", "input_layer_height", input_layer_height_); + "ess", "nvidia::isaac::ESSInference", "input_layer_height", input_layer_height_); getNitrosContext().setParameterStr( - "ess", "nvidia::cvcore::ESS", "model_input_type", model_input_type_); + "ess", "nvidia::isaac::ESSInference", "model_input_type", model_input_type_); getNitrosContext().setParameterStr( - "ess", "nvidia::cvcore::ESS", "engine_file_path", engine_file_path_); + "ess", "nvidia::isaac::ESSInference", "onnx_file_path", onnx_file_path_); + getNitrosContext().setParameterStr( + "ess", "nvidia::isaac::ESSInference", "engine_file_path", engine_file_path_); getNitrosContext().setParameter1DStrVector( - "ess", "nvidia::cvcore::ESS", "input_layers_name", input_layers_name_); + "ess", "nvidia::isaac::ESSInference", "input_layers_name", input_layers_name_); getNitrosContext().setParameter1DStrVector( - "ess", "nvidia::cvcore::ESS", "output_layers_name", output_layers_name_); + "ess", "nvidia::isaac::ESSInference", "output_layers_name", output_layers_name_); + getNitrosContext().setParameterFloat32( + "disparity_thresholder", "nvidia::isaac::VideoBufferThresholder", "threshold", threshold_); RCLCPP_INFO( get_logger(), "[ESSDisparityNode] Setting engine_file_path: %s.", engine_file_path_.c_str()); @@ -187,9 +196,9 @@ void ESSDisparityNode::postLoadGraphCallback() ESSDisparityNode::~ESSDisparityNode() {} -} // namespace dnn_stereo_disparity +} // namespace dnn_stereo_depth } // namespace isaac_ros } // namespace nvidia // Register as a component -RCLCPP_COMPONENTS_REGISTER_NODE(nvidia::isaac_ros::dnn_stereo_disparity::ESSDisparityNode) +RCLCPP_COMPONENTS_REGISTER_NODE(nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode) diff --git a/isaac_ros_ess/test/dummy_model.onnx b/isaac_ros_ess/test/dummy_model.onnx index a3e6d8c..2bebd90 100644 --- a/isaac_ros_ess/test/dummy_model.onnx +++ b/isaac_ros_ess/test/dummy_model.onnx @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c8bfb3a36567863f53a67010cef0b5d6c31d159515e926d3dfe7b1f8497b2bbe -size 426 +oid sha256:26f1a49f94f8e9a4cab60222f2b64e7584209db500a6b0be3583fa6d2d7232ad +size 450 diff --git a/isaac_ros_ess/test/dummy_model_480x288.onnx b/isaac_ros_ess/test/dummy_model_480x288.onnx new file mode 100644 index 0000000..4d068e8 --- /dev/null +++ b/isaac_ros_ess/test/dummy_model_480x288.onnx @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f53b12654501beb93467a24ed1fea83057bc5cf43248f77ccc14ae5b21c0a15 +size 450 diff --git a/isaac_ros_ess/test/isaac_ros_ess_test.py b/isaac_ros_ess/test/isaac_ros_ess_test.py index adfd239..286e8fa 100644 --- a/isaac_ros_ess/test/isaac_ros_ess_test.py +++ b/isaac_ros_ess/test/isaac_ros_ess_test.py @@ -57,7 +57,7 @@ def generate_test_description(): disparity_node = ComposableNode( name='disparity', package='isaac_ros_ess', - plugin='nvidia::isaac_ros::dnn_stereo_disparity::ESSDisparityNode', + plugin='nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode', namespace=IsaacROSDisparityTest.generate_namespace(), parameters=[{'engine_file_path': engine_file_path}], ) @@ -77,9 +77,13 @@ def generate_test_description(): class IsaacROSDisparityTest(IsaacROSBaseTest): IMAGE_HEIGHT = 1080 IMAGE_WIDTH = 1920 + # disparity output dimension fixed at 960x576 + ESS_OUTPUT_HEIGHT = 576 + ESS_OUTPUT_WIDTH = 960 TIMEOUT = 10 ENGINE_FILE_PATH = '/tmp/dummy_model.engine' - CAMERA_INFO_PATH = os.path.dirname(os.path.realpath(__file__)) + '/camera_info.json' + CAMERA_INFO_PATH = os.path.dirname( + os.path.realpath(__file__)) + '/camera_info.json' def _create_image(self): image = Image() @@ -123,7 +127,8 @@ def test_image_disparity(self): try: left_image = self._create_image() right_image = self._create_image() - camera_info = JSONConversion.load_camera_info_from_json(self.CAMERA_INFO_PATH) + camera_info = JSONConversion.load_camera_info_from_json( + self.CAMERA_INFO_PATH) end_time = time.time() + self.TIMEOUT done = False @@ -142,12 +147,15 @@ def test_image_disparity(self): self.assertTrue(done, 'Didnt recieve output on disparity topic') disparity = received_messages['disparity'] + self.assertEqual(disparity.image.height, self.ESS_OUTPUT_HEIGHT) + self.assertEqual(disparity.image.width, self.ESS_OUTPUT_WIDTH) + scaling_x = disparity.image.width / self.IMAGE_WIDTH + scaling_y = disparity.image.height / self.IMAGE_HEIGHT + min_scaling = min(scaling_x, scaling_y) self.assertEqual(disparity.image.encoding, '32FC1') - self.assertEqual(disparity.image.height, self.IMAGE_HEIGHT) - self.assertEqual(disparity.image.width, self.IMAGE_WIDTH) - self.assertEqual(disparity.image.step, self.IMAGE_WIDTH * 4) - self.assertAlmostEqual(disparity.f, -0.3678634) - self.assertAlmostEqual(disparity.t, 434.9440002) + self.assertEqual(disparity.image.step, disparity.image.width * 4) + self.assertAlmostEqual(disparity.f, 434.9440002*min_scaling) + self.assertAlmostEqual(disparity.t, -0.3678634) self.assertAlmostEqual(disparity.min_disparity, 0.0) self.assertAlmostEqual(disparity.max_disparity, 2147483648.0) diff --git a/isaac_ros_ess/test/isaac_ros_ess_test_1_16HD_model.py b/isaac_ros_ess/test/isaac_ros_ess_test_1_16HD_model.py new file mode 100644 index 0000000..d635f20 --- /dev/null +++ b/isaac_ros_ess/test/isaac_ros_ess_test_1_16HD_model.py @@ -0,0 +1,167 @@ +# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +import os +import subprocess +import time + +from isaac_ros_test import IsaacROSBaseTest, JSONConversion + +from launch_ros.actions import ComposableNodeContainer +from launch_ros.descriptions import ComposableNode + +import pytest +import rclpy + +from sensor_msgs.msg import CameraInfo, Image +from stereo_msgs.msg import DisparityImage + + +@pytest.mark.rostest +def generate_test_description(): + dir_path = os.path.dirname(os.path.realpath(__file__)) + engine_file_path = '/tmp/dummy_model_480x288.engine' + if not os.path.isfile(engine_file_path): + args = [ + '/usr/src/tensorrt/bin/trtexec', + f'--saveEngine={engine_file_path}', + f'--onnx={dir_path}/dummy_model_480x288.onnx' + ] + print('Generating model engine file by command: ', ' '.join(args)) + result = subprocess.run( + args, + env=os.environ, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + if result.returncode != 0: + raise Exception( + f'Failed to convert with status: {result.returncode}.\n' + f'stderr:\n' + result.stderr.decode('utf-8') + ) + + disparity_node = ComposableNode( + name='disparity', + package='isaac_ros_ess', + plugin='nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode', + namespace=IsaacROSDisparityTest.generate_namespace(), + parameters=[{'engine_file_path': engine_file_path}], + ) + + container = ComposableNodeContainer( + name='disparity_container', + namespace='', + package='rclcpp_components', + executable='component_container', + composable_node_descriptions=[disparity_node], + output='screen', + arguments=['--ros-args', '--log-level', 'info'] + ) + return IsaacROSDisparityTest.generate_test_description([container]) + + +class IsaacROSDisparityTest(IsaacROSBaseTest): + IMAGE_HEIGHT = 1080 + IMAGE_WIDTH = 1920 + # disparity output dimension fixed at 960x576 + ESS_OUTPUT_HEIGHT = 288 + ESS_OUTPUT_WIDTH = 480 + TIMEOUT = 10 + ENGINE_FILE_PATH = '/tmp/dummy_model_480x288.engine' + CAMERA_INFO_PATH = os.path.dirname( + os.path.realpath(__file__)) + '/camera_info.json' + + def _create_image(self): + image = Image() + image.height = self.IMAGE_HEIGHT + image.width = self.IMAGE_WIDTH + image.encoding = 'rgb8' + image.is_bigendian = False + image.step = self.IMAGE_WIDTH * 3 + image.data = [0] * self.IMAGE_HEIGHT * self.IMAGE_WIDTH * 3 + return image + + def test_image_disparity(self): + end_time = time.time() + self.TIMEOUT + while time.time() < end_time: + if os.path.isfile(self.ENGINE_FILE_PATH): + break + self.assertTrue(os.path.isfile(self.ENGINE_FILE_PATH), + 'Model engine file was not generated in time.') + + received_messages = {} + self.generate_namespace_lookup(['left/image_rect', 'right/image_rect', + 'left/camera_info', 'right/camera_info', + 'disparity']) + + subs = self.create_logging_subscribers( + [('disparity', DisparityImage)], received_messages) + + image_left_pub = self.node.create_publisher( + Image, self.namespaces['left/image_rect'], self.DEFAULT_QOS + ) + image_right_pub = self.node.create_publisher( + Image, self.namespaces['right/image_rect'], self.DEFAULT_QOS + ) + camera_info_left = self.node.create_publisher( + CameraInfo, self.namespaces['left/camera_info'], self.DEFAULT_QOS + ) + camera_info_right = self.node.create_publisher( + CameraInfo, self.namespaces['right/camera_info'], self.DEFAULT_QOS + ) + + try: + left_image = self._create_image() + right_image = self._create_image() + camera_info = JSONConversion.load_camera_info_from_json( + self.CAMERA_INFO_PATH) + + end_time = time.time() + self.TIMEOUT + done = False + + while time.time() < end_time: + image_left_pub.publish(left_image) + image_right_pub.publish(right_image) + camera_info_left.publish(camera_info) + camera_info_right.publish(camera_info) + + rclpy.spin_once(self.node, timeout_sec=0.1) + + if 'disparity' in received_messages: + done = True + break + self.assertTrue(done, 'Didnt recieve output on disparity topic') + + disparity = received_messages['disparity'] + self.assertEqual(disparity.image.height, self.ESS_OUTPUT_HEIGHT) + self.assertEqual(disparity.image.width, self.ESS_OUTPUT_WIDTH) + scaling_x = disparity.image.width / self.IMAGE_WIDTH + scaling_y = disparity.image.height / self.IMAGE_HEIGHT + min_scaling = min(scaling_x, scaling_y) + self.assertEqual(disparity.image.encoding, '32FC1') + self.assertEqual(disparity.image.step, disparity.image.width * 4) + self.assertAlmostEqual(disparity.f, 434.9440002*min_scaling) + self.assertAlmostEqual(disparity.t, -0.3678634) + self.assertAlmostEqual(disparity.min_disparity, 0.0) + self.assertAlmostEqual(disparity.max_disparity, 2147483648.0) + + finally: + [self.node.destroy_subscription(sub) for sub in subs] + self.node.destroy_publisher(image_left_pub) + self.node.destroy_publisher(image_right_pub) + self.node.destroy_publisher(camera_info_right) + self.node.destroy_publisher(camera_info_left) diff --git a/resources/Isaac_sim_enable_stereo.png b/resources/Isaac_sim_enable_stereo.png deleted file mode 100644 index 69c2126..0000000 --- a/resources/Isaac_sim_enable_stereo.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8c9ba1412d085b83a42833d12765e6ea331000047bc48849359858c5e9167beb -size 109035 diff --git a/resources/Isaac_sim_play.png b/resources/Isaac_sim_play.png deleted file mode 100644 index 7208c9e..0000000 --- a/resources/Isaac_sim_play.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:e5c0d786681c57ab859790262f19157b22a18a1f4f0bd254fa7f65df12fbe516 -size 784360 diff --git a/resources/Isaac_sim_set_stereo_offset.png b/resources/Isaac_sim_set_stereo_offset.png deleted file mode 100644 index f6ce4c2..0000000 --- a/resources/Isaac_sim_set_stereo_offset.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:bb562c0a01984e1115ae37811b749b14e2bb1a4429a5e3e2043157ef52d9aa36 -size 112275 diff --git a/resources/Rviz.png b/resources/Rviz.png deleted file mode 100644 index 01d8bab..0000000 --- a/resources/Rviz.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:06500adcdb5805645dba7967591168c6b1c961e77131722e5f57d93c7bc9fd7c -size 871120 diff --git a/resources/Visualizer_isaac_sim.png b/resources/Visualizer_isaac_sim.png deleted file mode 100644 index 7e3c2cc..0000000 --- a/resources/Visualizer_isaac_sim.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a86ff0874f5a8950764718a4e732d6f34b6b37489e4c8b77e5081e114bc53971 -size 242879 diff --git a/resources/isaac_ros_ess_nodegraph.png b/resources/isaac_ros_ess_nodegraph.png deleted file mode 100644 index 7052df3..0000000 --- a/resources/isaac_ros_ess_nodegraph.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ed1f1826b874f70f1d7591f92eb0cc3c208e953db30e78e3a7d789e801eafb56 -size 34903 diff --git a/resources/output_raw.jpeg b/resources/output_raw.jpeg deleted file mode 100644 index 8350cfec81695e77a374ad9141f2fc3b9627dc2f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 87772 zcmb5Wc_5Tu^gsU0j1i#>*|UTw*#?vBL$+kEG^8wJ%k-|0P?n*R$ueZgPK7AjkSvuY z%m^WbN=ieB%3jufcSgNG@6Y%5{pWki^W1x%d+$7R&$;KG`#R^&@1ftHAZ|0`BgPO6 z20<|J5BmKcIt(G;aK?W?EZ{$q4T)r7L2lo+jg^gaJ0~Z{b`B1%9X#AzJ5W0~IJo(_ zQM|kO`1m+?3JCJ=66D#%w~NsU3<27(AlZ>f_FY^YT)Y0C>31W9Vq-b9jfjAuAUFz! zK*4^uK%(GP!OJ2Tmp=r9gLh=z#>NgFt8&92I2?vRLNFFMgAfpm2aZA`_PDShdG&34 zSS3}$rBwM0cHNb>RZDo?Gz%K-W^}nl|7V}z={^*A1UhAO#;gQEC>Wdv!LkR9)aO;% zC28X#rOL|p`yIrI0QFD^6r=}j9++GWoYE`)qG#Wcbvds6%R}j|1Rqk0gT)G~%2zJ} zy8X%r>Oypx$)jPahR@Y*LbuP3)Ri+-llsNiZ=H6wEz~J@Oe_p$yl#yR{vq=`CvWPX ziSe?V6BAmu4NM*d~w&WPY$+vEcp`5tJLiN zL)facW~YmM2fiQ=e1R3dkp1C$A_kAz1$e-7k+i*=Hs(O-Bi9bu-BXi*FevdhMIw^a z&)FKcezP}FfGdcokA+z8UFB>^8>)#QLR{xuun;e{Z3sdMLy+V_$$qor4(X6JWESBC zVWFs~PumSQkXCWX2d(QjtjKN0fBlAtMRQ>+`~Ur$jJEIRKJ5iTEDy0oEJX-Y2?%0W zsf|v>MRRaRi@t{OJhDrc>%Ti&<$XLKUhEv4FBhDQ=S2%!llrd}wsGN?*~t<;P``1c zLsU|827!wmxwc_@|cJSukCrfM_77@n}EzFVBr!&fg94cKsfd3NTE_|AumP9Lvp;YWi) z)vZoXZsKl>iOXMkd;KaUElZ{<=7Svd+^rn!c(%jPRO)DopO#0Rhbph?zS0k3r>L0k z8X`%%&+I>5E z{)p+FoV^^~GfAhqrwl0ut-S6WyX0*GMvaKD0KHt|ot*uQQjT4?H01Cmql0MCBRuN7 zny3JY^Vo8Jcq5JjKE#$4?u`2mNne;@2s$7h4}E#oF}5^T;-4f}8$@J2fzmEaqEkEe z3w(NF14FCpRqQXCXM-SwPT!c6XWR3^F$*)SKE$V0_F`SK5P~Q?QI(Hds8_3I8(7S* znO8vjBsh3;9Rm(rv*Fd=H-JL7#j?R$&I#DE*Ag9lQO|sZqHB2+BIMmElHUz)T)z>) z&(?3*vP)e89`hUCrG^cwzSOo_w|7XP%&zoPN;$}{`4j8 zB36Gj%{MeLQrj}`o9G;EDOqaoy`g*murP*II410*^67m*8$mopEWP>=GU_eDkpB_F zy8Hk;av%xAtaT(d;)Dc67^g@C3;3YWFegxi=hfJAB@eP-)u6rDsDUOd=%I(03$)st zMCvS3KM>?u(h%0$A;=p-VAF}GVYwE@c|xE##uZ{ojWxyV=$NKJRB%Ry5X3JagF@}ND_~{X@a4`UCE(6 zE_=M#D0dPhsb>uMfuy104Vu{6Bxj5-%j?@oIa7p%PJ`Z4`1#o~`2UK$`DFniVTfgj zl9q6hPaVo&VadFwecjbJO*o3(?VfpVMufb@vSTwk`K*j>Rccf*Ldr6zYvLCx7J_iG z5%icJv!ZCCmmBMS33VtJPSu})8$eEpC-#(bOn=~y4e#cL3)E!*#3soO}d*(OPhuj0 zd}McM#@w?D@}WmIjNbf)=IgRXUWIus26l_-dY0rUT2<3@#ni^1=U?)BN4@uU4_Kmj zA0Kwx!($*4!9C0!*@$wz#NmFIm%|9*oUt#(OeEA9?y;nq$c=mU`=0DG zED^ZK?snIeREr%FaR7DRdQr5jA%WcwQ8it%jR^tb<&qA12J+@sJP-sYvNnPC9M0+x zMGFWV7~^xi&KYo7EJX_mK}TWESjkHqLa-WyGme|9O$6b5j+-HD{Rs60ys8#ovC6O1N$P@!|=g3hQ?^1(6;a`k*FIZVqxiSP15$d^Q(Jl?@5+amRBzL=f3? z&92!96Cqa0n$R0iQSzXajW9Pi5JKlFoN%STKd02^b_yO;JS+?!}8GCdQbe4^5s`V?Q2oJmT!GL84iSL+6W9Z*!FaH2SW6 zssJX2@^Suva84r@<`dhd89|k_yLWJpG7G-kAw1U_=6bL;roVR|5Ue%{D6z5TlJ9RQ zU7K}7_rW{W?5<%ak#2HO%IPd|CGM@U4kG(UZk&}L=i24Zu zEh{ZmW_fb=(nrp8moHb&S9zvXo;e?UUN`z(i1GY*b@TGGu$`65rFrIuUv-Oz|Ew`v z)jO*9Ze{%=dBL-zUbVQuCx^MIlzQ^BpxFh2$K2ffF8aDzF;Y3IR z%Du?J`#7c*%=F_Gt)yDqVOJuIH&Wvi4bLot3*KPZA2m9 zf#EdqRQII8!Yb;5q=9Nq{iq0u^a-#4+Qq$#$zxc61VbzkKC7UR-D`u5qV)-+An4QX zF9A|GA|uJJ94}Q7xn`=;R?hX$U3DZX0cb=Lc{F#1t&LG(K#venSVIVD>T!1l3kHM2 zMHni~(arQeczNQR>m=u>12$Shw;&*fDf}|62W4Eh1LepPU4v4GaWxveXh{=bypPa^ z2*m2aNsslfqQb@M70M<2058ALNm!cnoCd#jk|`{o=B) zyb^|!{3-}AQ01qWKN-j#O*hJ(l~IX>AkNJQh;v*!*7Zv0297s0=j1n0jc z^-!m#t{q`~{*Gu!r+e}t*4Y#akEhR`I1Q7_rw>sm&t9moO+`A!U5)$YHljhZ%)XM4 z)&5iL%~#Le?_ULf#M69!o}qnJhzY=F2kUKH5#Lo-vKVw>Th~O6QsdfgPub*{T9as> z%N?Tjndvlh>cNuM@H^&#DgH^>?mnEMeZ~p#hVA|_rAoW&eszD1dyd!r@o2RB=3>~3 zz*79YUeNS~H=gTdO4Uo`Q!$^GR1?K7c;L0{h#u|-?4xcWaI*s{x6^9~?JR=4s8tRw z*L_5krsQEaiG7fC*R5KTTi$7ElGHUMV_=aV-3&>+58GW`_mIA-ZzK2kT8UH?GBR(+Z$e04%7xJEZZN~Ta5k4T5D-V$Gw|8*^2)b30Q9(Aw?lG2QUWMZvZncGW^w zSq%mNzw>^0{Mv=I^p)LrTUX^h(2~g#BdC%`n&!TO8|#y&zJ~8uMfVbYiO_I zFD}&EB`&+-dF7^3Q#T$tjOclN8PQgmD3aNKD^>-uBjoIW%140<{HH^&Za95C;Q3~5 z-6za$U7Wljp1hW0J91yfe?zu8F}lelkJZDr)Kv=0948LmdwFLI7-^4V4lPCyvZ6nl zx9XKCTQ2i4F;X)YDGIo`+$#IvGN%w&A6YXc&fE6S5o= zS=8CE+@Eu{dv*GU&ON;fv1~o7&NY#RGM~)LXBQ4lkEt}@k)85${Gx^-mbf4!;({)w zzfY=|ijt!*%hWm7e3>TqnLjH#syAFi@19y3Y0ADQvZ1H+W3IOB`SP>szCi8NP4^!@ znc43jY%0mrvu>n?q~bHH;%e}_(+Pna_cL%CmBmt6phP_+?+ed8KXZZ_cGF2rr@mUG ztfQgixn*VQ`>&offn~iWu$NwGP+tmq|0t=p!Jqyz4yARVSe8h&KbsW+fnCTc#yn#x zk9$yOyImz)TM?fP(}K%|V2#iK`fag0SZTIMK(JS(5Y%T9Mbq(h00$WHKY0j}HnTbY zZw9OiQ{6$l4jt=m=6J!!v6=*2Of7?ZfK-?(#a5bm#l(EOctR8ihM@}N4)R54}i(U}K>KQNT4-tXY z5K(KT!nPy(mj74u<}vbHz~}E9IeIg#Q}o64K}yaA|CCd)YAeA$^9wf>ts2tDQ+CIt z726t>;Nz|rDIdtBN#Vv*G~X-C4WOP-ga*y=DU_RzdvHNwIgd*gL&jG4htwi)Sc6+`G?e#l&>Jl;c2cL6CsWs$|QH{>V4X9OL=*BD+9ybE!5sJOYBr zD8d=F;4^Gl3LLu}J=~g(!_@a1$P&``$+YvEDakQF9SGqJx8G-&bb?V8R*b3tG;AD+ zwXl=58Qvw>?ix7exTShwk-&t+8*jsZ!ZK%evo^pV9b=FcXvnAyonAmttS<)h2$P8 zQc;v?1@&NQ34YvwsYl`(+aQDx&BrGJ>k*V<$f}!PFqd-5Lm$`});xG;WAtt~kp*#B z0>caCf_4yRw}c@q7wdMmW8V*n9Mh63_JgN(HQ(v`7LxPmy}*O$g-x1xb$zwRM;Vjc zRQF|c48^_JqaJ1Jy!*>GbJx&T-!2V{3%d)1F1>QZ8xTr?1iTs)9Jjwb+3io$waQ&~ zT(X;+3wc&sR=*tl^SbtkdDx-#o8nm^JI%rOuNx^970S2#qF&{O|mf4hm~9& zcQ!N^lo`8k#%MUz7mK|m=`Q|$PaGc0?1{5eB8r({UP5l&u5qOWV!1(IKmiC(El3hv z4}wSR$ZTV1r;*JNNC#fS!Ck|Eo{8HN~ zQ|Ib!t8k=sn??%()8!*V)1W03`=s_)j7T0Rvvi5di)P31M!aX`hTOhym$tZg;*}Z&t0>BZnFLN}9E5>8q;%!RWRRx5M14Qfm-r)wh zT9Ua5LMfCdX`W}Ut!9*1rW|W{@N|&&^*6*$|JET+3OngvZ)LMNQhMRLIGH|p9cY!! zQmcmvL;SMNl`Yr606_z)ZGh2ShToVPohML0s86m6msGU9YLA6M<0m5)hG+odKs7ur zGD|ATKk?AcyX7o|<-!&a!BQ?6Kw(TkK>V^;BDgV@4S-d2F31C(K4n1;Cmm$tdd#Rm zNuu#^1o23ZtH@&+c?bd{W8*=j)mLMC%L=YoaTG<`%nss}ug zAr=bGviM2lf;0?85svC&YP0mi3XuchvfOS$gDwTCtk9HYWu#BZYRJw? z&yB8UPU_t`J0DD0{i9VcLvKm_CjVSf!%wOpL?RcR~ozvncEF(iUbB;D8ia!Ty z`EQ78Y~AT&-AAR<^;R>X)nd=il>LV6#9CM>u`q35+W^7byBz?;Y zeC0ly#&#e${|6~E=0C$ogC>%`8&lFg>!)CavfYab;F2;&;sa{iPD*#79#(EaUEB^S z>bZJHA#9?yL>CC-Mx?6hkfXligLuy!Z&hPMvwyyQur@vz-88WpVr6#qaJ{a$ELr=g z;`hJ&)H{cX3*y2j&1#~xIbzIJ`VN_v*n9Kdal8L=@w9h`eW|Xcx_H=z&R6y80T)sn zL&fLM=vBX46)Tv%**!s?(CucgTd029G{1myIFf}O_`}NGYIY8BvyHj*uQD~})y?1D zH{VHI=$>vk;bfH)&_6f!d9&y@v@jw&rd$1Jen|#52z)-~KBRAX=i>BpVMNBDd=jQT zgSX5;P~3}yeG<^C+r=_AexpbL^VAjzgd|JJKd#v*RV|TY)P=S|l-q`ppGZ9JJ{L(n zkA$R63){^VF{GF&qcn3gV*>~JX*_Ql&sTthSSqM@T~P-qE~4S*685;FTrWqlF)Dy5 zZ9{gywh!DmQkk#~Ci33Zh|U)36B%i|xF3{Q(siTdPfcvHsqhUjrxS%u|1v)BGK7T) z;#Ez@{b}pY?mk4XtxiujZUIA4Nl`tw7)8>C*eXLtf;7;=n7a_10T(6nB=hH8jwe{} zHH$u`4*=^}rb3sVOZ%SDNFC0yvPNF6o-=~Nunq@SpWo1-P5X_i)vFC2IV*enqMLzGoU~d$ zJvX~VAB*c!?Tky^o}r(%=3cEKu=@c?REX6#t~VBs5_ZRWxDn8OeG|oZi#=0xKduIp zycclln|F#I^>YgOmUclfFqr)1%nzZu<&RcUpFH0y9ONLOat9ql-}<}}^p+8dxJPJR zdVBJ|;t0bnVyT`n-#z`q<~OwMnC)uNTStgdb?xuasl_VdPOu)N7xgy5Jk0LK`} z2_zsi(3A_2Wh9rKu8m(VE|h7~Qt1Vn7w}2BD)sy>;w?y~3CpO-B;-ua{`p(EAdM<3 zmwJ5dR3u(`m^Ra`06}aou+x1$vpUfJcu82+0RJ4Xp0n_B``heqYbj^oZ_@`BB(gQGkqsdg2(hDw z&Q)d*V+lmdRl8?l-)wIOYPb;1j8e9DhO?@w|Xnn7nllik|L~}#r zoyv&dvrzN3F!`VJ=b!gqoYPZX^KhWf3QGZ)uwtyIQRAx<| z>+RXBUJ__75(yD=vig=2HMi6m%xS#*X@lS}!*Uszp`Z2Tm~%{h_kYI;Mu7ofR?5yH zfoVGb_Yp|LvcB3}q}a&^*d2!)-7y%O_`el^%OO?XIK7j>(Ek!opoI_?fO$Mpbr0`} zGkiOWdS{Cs#t;!WntPM0x}0WA6`6-2I)L?t-fp0M)KIJ;@DwnMfO!CMbaHNN>1=r` zmsE-}0CTOeT#b~2C{`i_35tM<46MYj9Rm=-#FlIFL^Qg6GlMF7{hu@<)fm))9Gxu) zVuo9jA7okGgi?XlR*h?H)FD?*l$H)g-@KzMZt=Ri@`?t>aPG?Wdjs zdI=(58ziz$j_oM!a{Yo<6&7~&d>oxR@lR((Zzs1$U=q43D4|cdIoTpauY4Sxz2lh^ z$8h)3;8bWw%3SN0`G(}pqcYPUTfzv9>)DkDVkdy#=R{$wDz1cn3w_eo&Xj2XEv7(d zq0H#VbMMvQhMU#RIZAz-2WY{m>wc?`%I43gFE7o9d9R1TJ*6iVGVmr@fAnVMO5spTb>UxRrjlWI)+<5|7?LpES3%M6G zqQ{KW+#r!S;l{Y$9rqRRjr$n2V9ya~9iS`VT5Obfumxk3F-Sn1f{{(*Wa?UwMGnQ6 zC#b1`6dWPt47LTG5wJ#$&hN(YkW2iSoo5y7=tS z#>dfTC$kB2-B+jA(Bqz3-e0ZeSpo zfFnr2z@k{SM~F}I;XWt>t`$LqAfn;SE}lS6)13ZSI0jQQa#`RDby(SkSrN58^KyjksgwyY{RT_l|Q7)m&GC~Y=Dc>6sF zI9b3OvnvCC3dF!H^;JlN&NOr`bmY2VHm4){vjva?M}pvnd$IFzFe|MAriww`jTaCv z|98GZLaeN5U3!lNqlUC|tTfvIlBqLzulwQ@IcJR4ur%^fGrKE;oIJXqqkgKd+cFUE zvlux4il#WXn4?g2{hwo}@nX#w5-q{;iSUM!isojnAFd*u=;MqeByWpmxsJ%ORUTOVqe<1K)fSR zf0*u)&o>nP`nB|V z&O;4f3NHu`Wa&}qz!qsEBmgVwSaZR3i;SY)ec?l4B@Y6i9Bhmw!Is!8{$XJ*95_5V z@y6RFVZ4<{pii;6@Z-_PL7)mGl-i4ly0z8%y*cyFg%^&rX8}nVz8pxwE5Nk>MSo^4 z@YHqyHx3NylLwn8>K)edaTERImaU1U2FV+@$fya%bHxz~;?O4S z5U9RIz~kE3ODNZ%ztbPPRGTO?ShZ;HEkYw_EdDt5Z7#{m^W5OVZ)j(wSmMA{yD$DM zAq_h#mO`8e?dor(CND0~sX6-DdPhVTI?}`D{K$DX=kY>Ug1#G9W@`tpY8byB)WBb- zqvh*7O3{)S)PV?ZQ$AD8)zGBz12MjD$&Z$GueW@jCU>o6&J%v3$)|ZBLM6YsY+TqXUu;&1)!tW%84-1Pt1xq7h86Z<*+^4Mn8cC#q zPQzW0fS@DQ;rQ0Tz+n37NF3Wa-1+SWvaTXXh?^(Kn1TYWfr)g`RWaQ_(gn$^8#U78 z>WC|kOf>fi-FXC?+<4ZBm-dTM-kTvNa=Z!N+M z!(D64EsFm!bL+Z84Q=wg+3TvM*l^%`Q2j^yN##=IdB0Hc zcjEzP`q*a{boq-T1=}z&^Uc{$@j7WiKX-XxVwO{ErZ<`uF8N*24w_%B&ps2lUcFjf z^s#7X{%RP0{iG00Jd-Xy@3zq3*h zO9!qJVTK3D(bHJUVvCE4+GYgJLO8SWHmKk2u>w-S8^BtR8QbZ8v)6wGF71oGP?{sP zl3&(~NQ7YMN?ie6o=0Yh!j6te#){xGe(|hP^Ez<0`>+xjh$!&NBH>c57{N)idoryg z7Z(@FmYKhMe6Da*`5#dP5?jk75+BV=k&I#r4Z|1S*2CE*0gY?7fwd!1lQlFy)6u=) zc3?a;H0>Yak#Ik2YsfLQrPo=Rs>Vf!19kX^RFye9Iid~-Z_fHpZIH@bCV!nB|FY4G zPZV$YHJyU1BAbYMl$UtQ$g(92YN+b$OD>tvq008Plw9goq-U<;vXc9>*Wxs;u;t{J z4u@pXc5jr*$W{#ACs*rT5Yh7_eRO@U9rAJGVvhTI>*<9L-6M6Eo|FAH_xr@1S@sBd zelu^`lm;-4fD~P)jCt|ZpP}UUMY|8pe_8)Y`F?rhdG&)yJF$Pp)_+5Z9q-SW8)r_x zwAg2Oc3;g#1+{K;cg1K4ry}~Dy-C?m!QaqL^#`=&)y+FUY2!|B5;rzXdOm*L?D*9% zMee@;Bq&T#X>c>e+}sGDj=JF(dFsRM2nix(Ou!k(mc_6H7{q0K)c$&VpE7B~%s|7& z5T-u-=r0E#nAWX(s>;#PHT#8(2q7tX?!pivT)@}%?=*zVTi#KN=B<@8Iu3R-%NGC%DMzY67=WsHlURiBBBk+dmryar(a|?dn8fUXt z&7CF2uF5`puLE^&N*b$&aXFCb(3UsctkpyOf4hWWvLse%M}6GKpVz9*JwPO_U z-5ug}mX4{L`)Sdc9rkf=V=T=^d|a<2aESUv>&c@^(+4zK60+!}snpjU=QZ(tq*afa zA39X}UEU>j$A&vT_}&XsTB_@7vG_^J(8Q-A-@b=_SUvL*wMbnE3R=7R8#?28^fgt_ z^3fkFjWNw=0o#2cTs(kJF5*MC)qoTV6TqCuP-U1j-Zcr*4@N~ zU)E@k%HvMl?Yt9M2pVAmF4FQwH=>M&rA>Vy;-C63X!x=hBaN73n_&jONc?#MDJs{f z-5YJwK0Ole!Vj-}>&zP|}cq;`*N&7TFA&m-yy>&5=lo<|&;sE&Wza7Gs zB&gwOV+6repamK&fGUBy3S&3()!97a>r?W7WJ zKKyB3)%$81u=+Q2H|Gbrz2RYij^+Ky3w>*{^soY5;fom^F`E?)zDv_Q|uW7k&-VI|)fg-vs{T?Rw45K+qS`R;5);Sk~#BDhd zs-3ZTy|qS7{G!Udrp5`M?i9T>CRRYRM&+bydpT^GnR|7Xp z`wQyuNlobkXHoa^k7IdG3b&l_a2IYdg2iJYWdAjrcg*4Y_aaQaiis&CGV%z`6bF4Q zBeg}VEHt7uBZ)A;6eg3*;`5A?nM?sQB{o^E<*eKZ!>yXYOa!1;`c?)YU;a>s1n1-T zz>_BJC&3203oLge@N}Iut_yBUABrXqsR8E)LYmslgWlNQ1M=zj_fra)3k|$wlf)k1 z`QP{0>J~|xaW`~32UZDELNOAS1&6yoY{RDlvAvy+R>9*EL~b0aI#IcR4+HC{CD-%u^<4GAbJwhAcv)x9nD6U~9D zlO=We+w+)au_s_!ZMJ;<);B%rJF@e@{jZm_hu5uKrZ!F{k+a5TpWi81(!~u2=P2En z+0Z)`c3z7dJw8XvdYx4>JszZ@U7vZ`ckpJ)Q+#wrN8NNBP}tQm=k0wz_*1o%>2(8% zn9Tluxjt(?vm|$9M2K0bj#JI0l0KlhFLZoc4YhnmS!r0(%YQRrRg`mRv-Ve+0-fBw zno7`axFh>5@JDm&*U@$@d|X;A-)(l)`vN>2{4#H_9-(gu+{aT9)7Ul;1IKiNf^YpV z?S{YgHfWXnQBh$ygEOB*1O5r6n${NnwFv*M_$M^tPo6_GfscvEf;z^_=Ap}KekxWgSvFukeWp*XW+jT& z`UE@!3`gK`%hFG7~RJCJFTk;6Gyu{iReZKd9c@2Zo<*WlXkx=I-@ zmPZoH8mf2Te(8qoy6R+=ObH6o-FZjmSuuKsUbaoaKjvVo4+w_1U3pqkXi#CG;9!hL zf-p}y&-97;y$7J=@bh_e*csVJQ>QqyYA$q4URj@z4_052yt)nIEc7o=W$bIybV}_^=$z<#nO&<5ZK)c3fw$g ze2R^%1>K(jF#Q!nB``S-Kq-B#MiCGs!^0Wz16$KAkqbVj^IGPdl%>y*C}oZJv?ln@ z)6biWo@aA@F`wyyk@#D$!|DFRIJ3^ zQR8Bn`&5Wk!a{JpY|UcmgN{3jO5rnsp(8(EUx}qD$?7U~tcmSg&e~nQQ-(h3N5_o0 zep$dgb}tTn>-$Ei*|pi#h}2{7!a(bq4f%SJW8JR{oayui#SulPnH8tt#pS8-w@ccZ z8-t&pRi@}E{rqusuFLO1nbW{?CE0VdjwSptMWv^-Df+nR*d?lrTBlI9fPfI?)OE`@ zT-Q|Pt8Vt+Y6RaDxT<+`&&yn7FY8UA-?!1{nhpmVasj&cfnOHtv zw3fL9sj2`J1?b(s#D)K3%Nb4@KWSHeQYoNJ+5`bX>o zdqA7BsJnkwkH7D*bqD*p6ETMQ2H-f61Acd8!41IqG~t$A0PoqRU7Jvhg~?c(W=WhA zOd|+49tW04Xnsm6;&eEAO~5uN;UdI;?*yvmlK@{!|HfgVG(MO!KfYWAfJ)(o-yso5 zXh;IBS~%1fujL|1sI~<7Zr_#S>gvg;G)&&*sU;7-j=-mvHcXOB zPc@BQTHUm(y|67<^CA7&855H-``sFW({ESxPL)-F?O4;9wVu^3ts39Y>w}fkAX%db`sFUGR}DpVS7Js$y#8u8J;;&P>Q85^@TEHi z4w*RmzL!gENv#>vgcBjo2sjRh+rxtbsvN@2a5&?#i2r+hnK|h;6)r}Uhfn+gC>=@F z0xkcL;W^{`mF9(v9IOZ#W1DXQJfIxMi;kH+;%3f%IPI)53~bFj!C?x983sZeTcj>; zejssr^hidyzh^ioUE=@r2KWzRn!w7JfO!_$;w^Ow5;)z$#Bf=%qDj1aW_yT#YyRgB zrvsOG20uT)%nL<5mqH*eosBiuu&T;zkZR^-0f6!GxJa~fP8Abi$0mi`pFr6lvMJ$S zP<$iU++lI?UXX_~w}2=D+oq@tO!uxeA{)!xo)|8TPH+9e9TK9eo>SdUMqdwI4ld=q z^ec(rz)kAG2u*rmL@g9dr~1BbnhmH&H(2!_NSO`N(X$aAC&Yn-_=&Qrpq0Ss534&* zz0=AIw!$|7jOg{1Zbj{y2GyA>zo83$Qz3RMcM|Vx`?5a5wk)dnWOQOgDWRJtTK=j_ zd#$y%v-U+3zpSg#^#S4Qe#7Sv8J}vsqkw<3sq!rJLvYxI?4OQd)v~>_4fc~4Iu>%i zX6jWkjJijg2a;(ouEf1Pnz*bjCTn}uObZ{`i{V<9q{^ll&mm!LOl_OV*+xtIb$}rpDC3br)B*2L9iZFbS5nr)!jXl!TCW$TR71YMX7Mq#ib$32t zTKWQFdofwZVURk!qVFjAows*LVc$M47_(O-z>P3zNJwO=#oRQ#b2{Y43v?PX?}ih( zE!l_K;Cg}uggl}<&&u!`|J6&Z^vH5p-_AK9Z&_@QqIT3%9`K?$SYUbp>;L~s97VjN zNX8SERaWChm@II(dFstE1;Aa+9pQ%S1P2SDW(B-z=dI3JMH%f|6*3Y9%3NMs11vGz9~DkAznR^^9H?{sEf(c%F0E?AkN_Bn7mzm2`=gbGqh=`S-u!%q7R=!8xJJ+@ zJsoV=;FSgGz+D=BJRiNL_Qu%a*47YX#2lLRJpxn&4Lh-d)G<+3EO3){h2EeV9T`<~ zXMQX=4y(?QV_<%k^?oE#WD0D*NhNFp+VJq*QA6!kJc@#|tWViVF2onWFt#P02#^B5w-b;kuxPXGrV+;dZso|+4S{bk;AbMX;) zTdGz8T#;)BlXf)SmF=dC5Yq>Ko-0Ty@Lft%8>9M7r7y#Q#RLbSAixH|Za~9IVyf>4 z^rLU4W}@uTQVrq!yI=JRfvG0)&xJo2Gfuu%Dr3??;EtBi1)(v|Q?$JvT8pZgzT*!R z?R~yK7ccDh?HdmY_W!;i@Gxfp98F!2#Frdii4xzc@lB6BfF^W z>H-o8lfC{r34-nQDRy-ppr8tXY@mrU2<{WK84DZ8VHrtPV{Hn6;OBtXpGmw~Z4H(h zm)ngqrY=ggO_gd)f79_ad#$)e?hdRqDa$+{uM<>N826#EA8rmeXGPo88F~p*HG~G{ z)=-1_BB_MkKc~Lw$g7jLsaG#vozb;Q`@SJ_jC-Oie4>3hgmaNz^DFy|lInwg-{Iiz zq3Vf!UzhzxlmwJS^862WpHz$v$+OIJJNyXuC80a8TJ#9{30`#fl@B3!#mvCxVoH32 zxj(H=7OQ+;{`ITjKP^*wu-HKRb%jn}=om}$T?>s?QN8o4t%j}* z;_&yewt(Yss&CB=?L5@jl?A)uY zzUlEI{`909a^?AMH{=ZSTn$Q0ZOhB^UG|v9%I$;T44*BCc!Z!DYC3Ey=TMKhOX zpABa>-;YkIH%UBm(sJ*s&*L>e`baM0U~dw0R7v-{dYt@#xL0n z5|3=;{-`X&Nz=M-<)$-?F9S(8)|hw!zoq~RV~mAnKjq~w45V6_aU-2F?oPmxA{LvTZAl>lzty4ZQS9Qo7R^LbrF~!F@jY}Qv zUE7w}yL;*9Ta)FE{`Gnd^M6FqX)pA66b5AT5%w=T6v#*;%+UC-7^6GYQ|OAP-JEHRW4DKXaas2x-+5Y4aG{6QaE5*x)#jV=B9 zcz1Dro;H!we>!Nyr@AaOr>{Z0S(E-*v)1aEe6^y|8=sePs=`VCq*?5KaP72mTn~r_ z@Zxa+*dZhE=`@_Gms>{!R;9dg=l0_Lf}06)U-|bGO7>YBH-*vS*aa;{W4!+R!jvZ z`ZrYA&IMKR6htJ5Y_HlLkvvj_hjG6US*sDK%Qc9dm$O^nb-aPkYpif4x?!IrZgdVAU#__HCTp0Y+ps zXMSnV*N+dDat@PM@ncmReR`!zJ(L_tdWPI9yDHOIKDjAY58>8EYoUk64<*wPSct{A z<@Ay56k+eZj5Yo*F;{W8l)Kr#G1wqrTX3_O%b@8JYq(mx9JXJ67Z=2Sk2N6nunihOJ)f~S5^Gur34bdGdmC$geYtpssDR^@ z^qk3ryNu6-b?+Z${1(YRNdT7Ml3(!|ZvmLhJxP7eQ2Ov?RAE-Q==F1MjOw7N;j++? zO{11wknpOIS+t#$0SNz#zHa#fYJ1K2Sqhmkrbc8`22N%+a7qJ+f@!5Dr>AS9HLq(+ z8XppNkR3!P#I#b47!byLDV3U<&N;aOm5y_uO;_Y25Cwi3j$i~QFcb|agVTq=F{Ew( zAS7~$3@?OBOhEkQsTcI@`UUx?-ePyg7X(Lcn!CpXak`{VDrB7<^r!lduLeck2wKrD zR*9mTWcc`e4d4xY)_wGin3B@AhUIl8({l)Wua;g9BXpE?u7oa)(U&L6qylHVY6;+H zA<|pUxr1|6tpssO%4aQ=iKk-sKh&L?@>m9Z7pE;`&aJ2~DVgE>{lz}4hPkhY9iY?N zEp^6Uu5$W(<-%t$*6YN4;6%d0Ff0yUhpmkYa&JhL-vY%Pa|8 zCYb#Q_ywP&oTjt(>&QHkopRJfjoo#OI580u*A&xrDh>&}xb$RK{3$l`mJKha@n#;V zYz^~CjnkKnXKp?j28?+F_3&;gQH@q*45Ex4Lq4mPPh z-~qhfqcnFvIJz&wX~6}SJ}+lbne`F|gtkkXb8Pt+LCGI_I?NV9L}&uYiX|Q#rd7!@ ziDyQhCGkk#l_8pHg{n*8c>|GMEUcH+26*qPi>?Mu75faA$?hol1ooRY zw~vz#^tyjIg97PJ>`0Z0z4B z=uYzo>aSPIpbGrf1L4Kb9m#bXPk;R!U)<0Q38K$`{7SYcePDZOH7s!R<*$alqvXgg zOlGZ~696YCL|;Gib+?mfD#5TYeujh3`-FEtQ&UF`3E$3l$d~)dlYbwm;1_dePcZW1D_TlMk;5kXB{*tP&-_IXjJ(8HVfe|KsXS;Gt^YxbZWV ziBO^JTL>YHC0S=gs4Q(ymdG+frYA{RvJFXOkVo0FmQdLyDU}d1iV#96+gP$?mu&s7 zGoI)7zVHA3e9Fw3Gs`*WKG%IM-|zQ293EyY&F6SsPEDm#gK|Ex8x{gIGY+6};7F(< z2(y%tMHnha0?^cP1901BIt?u)Xi<4?*e+}ivvEzP5Nv6z8Ek!9oKWOrI5JPm|zH=a%1cZ?(#bY zGS3vOXZIFNwdhgdc0yA`(R>rCFJZHUWX=r?`p_o<8SX^z=*Wh zj~~$n(S>iusr)Ak!A>ThZrsF0b9Nu9s@KdxiZ# z-8!=tG28nD?wi@hRw4uyIEwKp)W>HG>AIlnyoF0b&L@os|8C-UY$tGCp_gkD0aFj; zw?a-bg01d0ye!Qs!Kb;TdLL;XC0cj*hGNh+s4HmyUtti!#Z(s@b$E)&neh!x)twSq zXkPL^ZY2O01Y8aSvNYjLz3d(I@DPw=AV6jg!S?gG*wyW17OO^W3XJmlx9tLL>$u`l z-{3%WgP^c^n9=nrivT69i#o*2mAjYYG(WlZZi)Mtx&7%<)JDUlku}Y#OzmH91U6`K z4tGa?6?M~GHAXw{t2c5mDR05$WC9*&)f`#s*c`sBQ*1H7Pz7g#6=qp$#0^&>fehyi2+mAW$Mcya zCE&^C;MMBjpj-?&bU2+ErQ~gnS7^>jXpDwZD>>pM|4lKWyeVJ>s5NvdsV!`UI}$1r zl7n2_-}s+#X0^hnXIW=_6+9fnk!o~7PL9jOPmo2EsV-$w;5q?zMUzDmSx9pz)z`_@1Pa~qQmbB4vZrqBrROE5HV61m&O%Q`id zl%@e&Z4K&03!IbZQ+6R+>|~Ei3028lf6~05_0y@qZ|z&KIBBxIVmIyHOvvq)^dNr2 zHL-oG!G`aD6_t$4Pq;UzcB)c-Y7^_IMcS_jv}f}FyZ(c4{eP`I)o35rui8GoQM_!w z5^UA^2YG=X1A|W|7ff!5E`z)Et&%gFF8{XTAF=f8Y|PC{tDDxs>C-H#CBj3FfPp(MXEwufswmb%yS$;%P?kx1VWYcC^zTH1_e99&1Uvn& zvmt?j+ZSCzYCAT>&(PK#KI7S|4y@gK@(1w=Y59W;CoTNMe{PZcCgnFw3AyZCK_NbU z>Mk$U ztz+?O$6UWN2Z(+ZY*yI$WC`?T7#){7w{oGlp*9xxZ5S#UE}xWRYt1Q-0^@MTXEFA| z$ZH(AVGel!C72RnkeXyMz9F+;aK^}8PV$t;-L0UBu?aMA7i^V4n!T2kRQ9T6K_!dF zil0aJNLIlI4?6D$b@+4uEL0;wrYdLx0fi;mIUYit7DSl#qwWfA>S|07h34k_7AEom zmMpe|wn4(@x6j)tj@i$>Pb=CRo))Jr+Bo)Yf zDqj9X2JeeXJH}>wfLGx9oa&CKQ2dr419QbF?;U6sneDQ6WbvGO$Oz#%oaI%aB zb8Wko+8(*meYX&-BZ}w%GPS#tK}u*r$4+i)BHp-Um)Fq$MY;hmOvVRF3ji6=hD$Cj za}^VA(ZACOO1H*jV0AK)6ZArF!^==q3zJr+-AXlby|l(h#bukOU*a>nm^xS7u;pHxOB4VbtP6!MahG75FEm4nIRw- z_OSJ;;;JrKnvpbyuIYM|4-9pES(S0iq#e>B*4bHSlz?hkSy*kpX=>iFndQB{H8 z??BxkpG13ayP4Y6q4RwLlUi~Vv&-5QhThmZY^?|(hC(K1F>1sA+hOworl=-fhV>3N- z(^jtOj|buCz9Nk5_Lh`3H>&+pO9)rp4Wo_p_8n{Qk&m)x&9 z7T2S61c~bSgA|?A@4C4Zn762;!??V8gTd1?J+O8sbkYAHN+FHcHeL+&Ez+*%isfRz zt7Mqy9ZxiFKeq2Ii|hTi?31>;h~lkJ9GEqn@kpy{j`0m}|J5GFN%Z20LY?8}gww_` zThZD`j+v3hF{BmIRZmQnA=$gcvj~O)b}1Mn1Fc;_KX`%aQ$TB z)`27`49IGHi$>Qt#;|j7v-)9dT#aBsFO9spjXC4^BcVE|3AdaN)yPILl&5`8-k^$! z9PWpk>3;)D9Vpnz0EIyH&s0uHl4K|WFDhEpJBOCD8BI!{&;GAr#t{{#Do;rOVCT1J zp{VZLFkzq?h+bWWpso-W*|Ha07Q|gJw#PDi2l(Hp#a7!}NaVRiM%ar62kYDkuu2Qj zBB*y#&UvNupC+J!X&{cZX{aDx*}+ARin5nf*Q%`dPs*Obu_8rYtIbKK6ZewpjE&Ry5v{yKA_xO72l`r-C1y1zrTm;9P-=QY2)o&SQZ zzgpmTF6N?C$!L}hDj2nin674KI(*){a@J19dU^WDN?X{=V^U&~xGw zjw*>UisGxqIUnR>i`U+n!ddTXT#C;9LqMwn7G(LP5ahcUCks7w?uY87jly&ZJ?s7&U6=AU$2?Q04H_I&W3f4`3uy$yMJ0-m#pPje&Bx~%4eR%NF@1Q$#KbKM0j&s70kcE_2H@{w>zqY1Bc}%Fz ze=$4t*V)f;zYcn~4+h-Zc(uCS@DF0AtJ~M|rg*AYYb3COHO9nGpp!bF(PHnlO0Dt0}#AA|?)opc_t;(&*4Xu)cLLEMWHoQCj>T{CP*+szK*I<^2(&dO$cL&x>{M{Wa zp!494><|=UMLnWg^cn#PB}(+2ht$E7$95TE)WM564z(C=VdfKi)L|{Qsy@|4wg=~? zx>fBsEI_Hq$%7i)`GSXs;BMjZLo-s68frs3h3a)I;R!@8BN|es2$K5K00j$zeo+Fb zjqy8PMAujB4M}x`3_GFAT&=a7W_xU#%rgd`%H6o0fc~cc7Fk%mHA`U2|9_ZA!vP4u z@5j3AK)jPX8rt9pvLcZLj%do^!v8!frfP_usu6dHkYSO4j95SXH1G%5xcZ$nd2^G4 z(&i@DHo$2I8=$foes%7^70W&pc}9OfL>Hy_mla4|R$Bp%#QEAv#krN>fbYYz#YGE# zq`d80lwb4*#abm+vx;8IzRBwVKd_hHx5f>k~#>doIUH9SwK^C zVXULjuK0)bKB|xt+*^%Wv&g3Pn10BQ)@@Q3HAMTJ2yk5JMayY$-4n*0^yAPsmg8`( zTW_hlAA{F`v<}HHG)O9F;-EBfIquzog=x!){dH z#u+=LcvwwU8Bch%abAj4=elJh7W6g9s2qSh@s)QIdm+dvUH6-l{DBhPF<#up@aO+O zj&dFP_bygfkOGH?wX~J45<$$w_^e( ze%LGOn4%s8rWj``GKtW+L#!f4>>s z)uMD=O{t8DA3=HBM1BY1)qc(`cYR*5?y?Zu=HOZ5U-kU8mTsYuyKDZPEobiu4HQ0Y z*cRm8cT}c){gq$UbA8^G((S<7%U`L)=PmTrK!Nr`uw|k)T)+n_v7RfBRYAY0JG@1b z&U}F`)LqC1zYX|cgj-O?^u^|drkVM%VvQrynYvA_CC6DZN>UPNG zJIZnJX7APpd9Ld7+MEVCDPY^+3;1x|+>vrZSP%Fy=;{gh#?1?Y`4>2)$zn}%`?O&6 zpSQ|SU?i_9q6#BB@s6E%qIN4;LJlD|!0Ft4C;X35%l4A=m3#*mlB>ETs+&OIx&<@E zI$rW8OGN+fayG`HOIe5IF0eZ}Zw0f$xW4s)F+_=ky+*?`ZUplHhVA zDl23!wBM9XX^ZVOLCaE!!eYU88Gn!-Tqk6ZHGX>bjJ;8x$lJbM`NiN`m2TPBJs(HD zdFVWV$$6zFKy|qMa|)%v;*%g~Jar}51%Dk_&lFrg+18gg9iCbIjpZPW?k!I7{s(JYr8fNKE@C71yh z6^_NuXp>?7-*XI+TftBQDl?LraozQ0g5W$Ie8(qC3cIeh)DS6zb>lARbl+#odL;VT z3CMi_rk$#yp2{R4iUV0LEq=C`8wLabBSIN~=5$XWIT=C-#=yw%aAtx13@iNi9>sVK zy)9Hnn1l4C&E1Wupn5auW($4}#w*t7^Z&K1vLp-D^`CS$eh|v_i{Zk=4yvoB@@vSj zR)c5Os-!9nd|a6Tl1N=;WI!?6n%p!;4$UHfdnA~FW$>K9#4|wo%-Spn z7nZx5ebA6sp!@OeEQnm;!iyPTDvUhbp5_v#hJSR8d`UK@#uE@GjRiRgu732&a_O#R z_*&qf`MnIP$hbo?YjzaK*W;89rW9NUEEyB`sQ?@}*#K0mr4=%| z1aKq(#c|xDj`cV%A*;N;&I8x|QGU-64%2E|YFOzRifN272T$S7C!)B$geQbYgh#W> zre*r7JC}!U=xgmhN&jb;zIybDYnKl_nr#Z&kw?Ra@11?WwP|69^fd5tboX-Y(_?4r z!wTxd>orvNLjg#GDGS^U3+;?rn*uT4x8g}JRF{#+rdNKQ2^aSutTO7K)k@gSzQ%O*>!@V z<&WFn*mtsLw4rLP;4s5+BcOv#CXXhmYD98=a6ibQ@IX$wNQ1I|qzI>bbl+1JU)JPS z22d7ZKXy2V3Epsz9~T@h726LFnW`l;;&`}--E~2AZ?xCiBw&g^RcDs6HgU9`-o0`> zapp$-S+|2a3XLB}7pQ-bAAT=M4P(Yb{x9`LUwCb^%S>AWU~D#|u?DvD%MJx*_Snx3 zw4Aj+ZS5g@X)8zES5n4f+%_i7s;N6}`n_>|C4G5~%K?EpbL;p)e@m%3@o~<*i6M<$ z@7M3O^&~F{G$u^E;^dZqU&a^^w|MMQ-;3*3J0T%NG4`l)$8c?B7)z`RiVkK7?L>A~ zv({s;W&^T|Cyyy%cP7H!&F^d&%mfYvph&t?Cn6MLFP1MXwQCdv>An}7{B{k`;`Uo5 z?&!mt-u+#lwtX})iuyv#G6E7_Ah(Y4+o!U`{#j|Af@|k9A4&SwS1o(vkMjQBzOi#- z$HrfAhw$N+!}SkZo6Du}jnQI#YKs69?%25Cuy*$3g3(4GepV|uWZ{kF9FF)kS3maA zhOzkaqc!c!A!p0yWUHHiKQxm&6E0I^)eJ)p;Exg;?jGv^@ zKWL=Z)o4a4Xz**O%gG@^poGAN@N!GkdC2%On|sKPV@+!$eRdHb^s1K@Oq2c1_fXo_ zLJc7|(R_>;XU-n1v@iB6OuU&G0>ek=5DmA^7=S>5NtV!I+VrnsLU&GS1=cjN#ty)_AY6yq)u)>_OxD zt6qaAtv`R2;P`_SP%M-3e!c&L%+6g~!+U<(P{6O?B{y<1f42M%(88Cl;Jd`N!l&LZ z*>=Yet5ff99w7pf;vTX@l^vj&1ZX%oQWz}2Od&X;@HR}oz%fjIze}qS_R8LD0s`lV zb3I1;3f)_Nv{?B;TaSe>b@Gku{fxYJ|2Y1H#?Mnh_-A#r5|1uzceORHd?sfIGa?_rW|@YuVs3sp~4t`YoEtgzVEHK zj`I~QpB)jnLerYmrd!u9sh*N29n2C6B}t)~HO6_gci+KJ0N?gb$OWL`O6O9tU^5f_ z{dIHp`|P;fgIXZ6k07XqnhOw4K&h4%n()I6q;FaqxJ;NO$gnp$nAP;^8IoR`PyhEn z7yvX`0Y!N5lpD05a1SE51jU-SJDhUAoO|wyfwT|0JP6gFGIqN|;s)m$!Yv%Jf-PPs zg@0jl25{B63sJ0asjE=~=mH2R9WbdfI_UC>q5Lp0bMf}i$khLOeq=OGlk*odt>Ubu ziC)9JH?3l6tsJK3ud+;`mF<@JFWY6-tE7fomb&68OCX9b^i+*4J)9)|+-@<)k$Hzl zNWD$H+SHid>b`5J7E=vaztTU*gE{4Kkr#=cR@%SRI?G|I6LN&u>)!w)Rk z&v37y191G+auSS(psEA$W9A6~=rjap6ZH#$au)`#0q;4Q;(m}=0tvf&eB8YTec7?Y zUbMtF#rJ=lwO11pmp!mz`S*escdcjWq<;qe!6%xC-?`bBzcuZN0&&@+(@#VXtnofC zrp5L9e5U2a4GK3}(P^^^qKm|bly!rXReP4HYx2H*D+z6JHN`>JV?^;o7AqZB=reAW znIgdlWrOyLhmGz#V$xqm5)ZRifPWq;ywqcp9}hioz}Teiwrw_RF~)E0^#0~GzH-4; z_UXtQF}xDJ0|2ciLKz<`yNXAP6Mur}n3(&&gvwjwND@vw*y_z8es!8q;^uB-DDY&X;YYQOlq!$6uOxD#t!4FPhDiBnQ= z;x&@U3lLg+R6eKst-PW#g805Ax-Y=zOuI;C`qDhz!DK9Nb@$-Vn(sY)&shun#cQ*V zRha!+Tsk#{qIwh$31S|_O(uF#RzW6p5o^oHJBHj?>8pdvpjoiVbbH18hW&4R$x4cv zSOQze(R*WA`{B+*kBc#wW|^F~8vPK8`o@@FVEkgpnFf>?GMaor0~0uZo4yN1nzZ9I zwu?=$D}C%zkKg>|uGRp{yl*pOaWRv>W%Td0Ej)@`>q<)Z_xj2`0NLKx%#PIC8az#| z8oW8qXe1qRz8ZopSEgNj4-XbN@9bbdz%6ENM_Qk&%NUR}IZ+q0v7zx=dl?^mk~ zPyREpSXrzSmppF#D&;`ssAzdz{ilK}H98s;o>W}Ud8h?J#xx}cn@;D?R~5vlm-_`X=@f|(^Jqf>3@tM+me5UCQ`}n{2=P)0&hdx$q2F>750RQ{C!wEGCU3ga^BeaB6BdW84FjP!S;LZVhH`r^cFl*~aAbkpUcXR&m zc+nn97cM4Et&0weri1lS-V3rwEtb}?&2b+C^pBpM$cxQRsvUm+s$uAv(cu{REbeaM zaAU~c4xP&;+m<&!S9?O0kR+?J0r*k?*~*ccc|u85+e+!VPL!R`>~!Psn+di~^Nq}c zh2MetI>kccL-gXdnHBG^n#mLGpU)L|xA|#UC%Zrq3XHnQ&=r9C!QgdFWOgK*{ot8V zJZpqWps^nS@OPr^ljuEAtP zbp=rfHc{)RlB&EV;`#LW9iiCSFWDNo#zKF`sJ6nkc;q!J#Aas3sV-PoL6qOk(z-8z zi3yUmC7bGs5IktcaS@fD`vhOWVyj(=hSpmXw2_@Wz%*<;CheK0H&pK{7V@z1Z{1+C zAbi;@=jC9l@9UKb?IGd1*Vf$b>0cdo94+R1MXXgq^^_i7@nj>~`IEeaP2>U1uW)jM zCmH0w8~-3Dpllg;@5i#ki#7d_O1$K+!+j_5!|e$xzpm@!CXxeeSmBrOs4<*8^z?FT z^9b+q;ys1lKQI`s6#DbR$fR;eKS$+Q44Jk@ps+g*$^{`oObTG|(ct=+s5a;U$65w~ z2leyjS>?N9%xwhy?kagJt{nVbDng{%HTY{i^~PV2LKGO2Kb_(YiBh@d zFs6RR13XG~O?EEk)gYVnKp{skx!oE;_S1f}XP$;`BRb3T!*jj!cS5{F^2XP7mjgrK zj(8!vFZkZgcG|fwU3nX-omvwo>mnPa)S!V6+LR5?WZ87g`LjiSE%y!OxK znR9X0(^B&6rhG4(d?8*&c*R!Db{P_CrMzn4JGq=8BXn`7lK@lF;A@_!Gc6i+7$;!$?FxBKIsFG1s2~08=m^bXOoOOfTY04xg!ts*% zGWIR9Mfbr;2=YlKYPm)zLzg|lScVgjNVavx{}@O@E|pl#tIZn4o&U349?I_K(j*OT zg845NkozyObKHrzW0n`ojqUa+N^nvZ-vV%a5yEdl>IB*_X6*(%BA-)V_o73Eek7|hIdYH8PWYcH$R&$ zHA2eOGESi!yc{DKK@>ipfA6OT+oWo*np}+PXdzKjj)`#E6|hB08p(;fKNnm!_7#d+ z6O6gIML*}q_+<};?g=S>8eCI7al3eL4qZoRZ}DcQu% zXY5k>XrU!fKFlZ-nG8rzwT514TM4O!xZX5G^3V3qF8gc09gKLWGt%IKM&GRYQmw=Wo1 zV7ZlFg!!x|)~+l+T`JAyPLo-!*2a6a$$`wt-^$|`7RTl{#E-s< z>)jBJgSn*akp2n^Fbc&JXGM}{ajWNk01%jAt5G={m`R{C_=tL1)=Qtoz|5b!Fp>@3 z1jr`NoN72HD7uS@+!P(D&J2n}K%#7ZwmVy%jy!x>R8EhW2^pPTddim~>Na`L|A8A3end_{(STq=zu@{Cgd zK<*CZU6e8L^o|ng28m8*vm&BR{rwO63_penz?r2E&P%F zWU*JMMTV5N0FG87X{YgH3%wi8*PJ`uL^)E)&__W7y5OL`GuB3^-j~ZU3#{0Xs54*) zK@SiEw9W=}0)YHtRCg2FW(a)-)=TD4lW95!=(-U8-2xee6xMnJ0=&rbjmb;GdIz5@ z0d2ws`HPrp9PW`2yu@?zfA%aHe4{Cer(k*+qF<@!3}mZZ?A&Y*v0_q~K_=SO0GB~X z0aJw_HtAu^)&en2<3eUZX%^ozGY+F2F<77-AS&SB3_gPb3il3BHWbl_vSn-HhC_-V%byRT;myIKHS&SB)zlWb&~*_ zqgz3p#ecT9zZU#pYF11+)yjofX=$xcQCBXyKoC~b2cb(NGBjKblmU`&kpz-Dw zRfrpPM(H9^PjFPVezXSUe7bxG^AB8TmLPQhl7>3O;UPXKsR~+iGmIWrG*`7z)VJ{C zd^n!MgGDOkvFma@W~8sz#L}9*@;$}GmDco=S{HI9g@QvqHj1@86pC?L`+aFn2Re*j-r&Q3EFtTe!W#`U;0#qSXF7A z=jsW)G`-Zt?;(GuX|5M62Kf1xp50>A-Yjlr@+3}s;J|rNZxXLab$fV=Q?89r>`ZH; z1Q*TE9g}j08%rIuVug-^+YDM=u=J80QbY-K-u-XfV?Nk<=)ZXiN>S1czzVZ&eoFwV z2}iY>3aq@F11)|*oCiVQe@2qh!V{wk%Te70p-6y8b_S}JVr4*QD|$tNg) z&Gwbd0l#&;V(j__xbHO=C>w=l?2+pKuCb7bn{bxMg4@s{+(Xt`=7p+KkfL!GfdJK5 zi2d9JLHLf-#T{#0eYl??!-dI@A=F&Ao?$>MTYmJS&dV8;`AS_SabIQO^iq@5EjEuB zE`XM$IZIWC{8duIR(Ahiffmx^oC>Vd2_8NpYaUXaDF=yZIe?-q8?1YwJSiH zj(>7`@N~$LzdEXwzxA>KGl9K zU);!RHL6>(`ZVj`o75Q)T!`cRsb3ZS@oe1ZYv*guwRkP+22Q+f)`~crcIi%MiUFV} zm`)lWvtMeJ0C%SQZ7vzHBZ6eAgr+EYvy&jKWxQ4g0cOTTX2S4Nox!Gnqtoe($T8*= zf_>y+TuoR z`t~^}-Rc!Tm~4ML6eRl`1hrtU+O9a5Y=F*Puuk`CSEh*WbRy*4ozkz+qMT~2LhnYI zHhE99tl5NcU1~}4+y;S!-bPX8a%qqS-g3Mzq9t#*dlU5u+fs7S#L+{C^6*BCu|t?q z+I5_BAoNiB7RA*-fV7ReD&)$_ZuPet2IrVdO9-cQR&bbSJm-t}Esf`ob$4>s{(>``L zbI3#cu+7Gy*rc%1wVSXX`G#^| z9w(MWa)^?<<7Gn&yvB*V9C0Swj!y(R7<$RC1dB_JyrE@3O4Rs;Cfv5$YTS8QSuCCx zb;sje`qgvT`&Vymgd4;>>pA`%?S98Vb|Iv^CMZvXe)nej5wbzLrqSHl7#_673Zh+R zj;F(6KACgapw%gK0M3$ym8G8_5OqHZ-@>3_6Q2tYCd(D73g}?@a1}!__6Alu`sB{1 zW}s=SPCY+U?;{Tz+#wO_$q0v8fKr(VQpdLmu-V?HTLS)|)vLPmx#CW`I{Li403Yc# z6r6-EcoN7()~Ir}1j&AdAi+*Uk;;s&Pebjazi|tdUv%2FA$bD9%me7f=CAdyg%$H{ z%zDC2y;atF7X+S&wk9efD_~dM)--5SJ?{X=HM;1#d{x{sZV@^PYIfq4Yd0pr#L6xx zuK090r4jA_&#XoG+%CyLSY_hRzhH@p&^7g-1F#$Jr;Z~=z9549kCG}BbtX~ya!z{D zV9|o$ElDGH+xs#%^0;?H?wR>`bqyKF%^k+|XDB|@#0P@s6^bn%XmNTf(x?l;*A+)& zKf3A$TkiLwwXa&eI4yOleLlDgGRh!Ozv304effK^TVu=f*JDY^tr&w;>aEixYdTx= zcqi7Qj*xUZ%-%NYNYs%V3Tx3vwzy=y`RCdtjWj5R7W=rd7LM*eKS1(-FQ@TkFNf9B zV}(8nYj-B^*uH$9^49*qoL|O+?D9nF&zav+qk4_)hw@52+pqN=jWg`5wK=Lxt-W}_ zLbqg!a>}+j;t_XNZm0|^O_3g7aVgE(Q$vdTF*Wj|2kz@bl32!2)Y<7TB}M6em7b;0 zK~$|MwQApJW3jxf>FmBxUnUF^rPeJnZ|5^O^TtY0Nw1gx<7%dAbCp5=O5n7E`1Jet zOMadpisu|#KKP+%Rr$x^!IS)g=O5Ch7({d%R$2rA&xR$tjylscTg)puP}ysCPGg zXw9VbSFux3^Y(wjI=?u6?Jr+{pxuVU2uzruhSh8JDv$(%gVxVPtk|2h0}<%a^4}}L z_S}CEhGCTnu{xfJ1$Df157{%DcO5G4<;qg8nVsLk)>;kl%uv9?$}sB-ww6I{9o+3; zR&v1)kE}vQN*K7|jwBu()BUPO7mjo|l^vOGTPRH((egYnuYI;lE9j`ryU{*<;)k&S z#o@}2SN(r|ZSuFrkNs{Ow8hXe574D}wQrZ1I*wqi1kBxRjqIRRsFAgRJLYZ_HXh-5Xui)DSn5{1hSnbs znS%sczh-Jvtxe%bRyV+Z57ESmbfP@PGR0^Qekn!JWzB+bexB1Oe!fzcVY)6GmsDHZ zxbvE~b(+oJDRuIvY{N>ShvDr`t!Bk~ikI5f?|gemBD)l-@_u}^68U{O`B%pGv5AFV zeap;yKlGHes{_TAzWX1q(G9qz3fGS===*ge9XGI2;LB4Ea-H)T^!ALg;#NP6AJ>*i zdoOo=9;n*B{fX$qqz7W^xz8iTjg{1LQ$2tg$p)#svL-&Mqfqo>bc0iZhJegOm?i%W zav~qE2E>#8V_3Mz+`tVq{&(qFdx42-b_<=*>(ni^${>?QfC_La{N}l5`zZ3}0?jb_k zC$CjwjU1Oy!HFizqd-pUi(aMohdcrK&71ivz*g1U9 zYY@cp4r3c>CcvjyNW#rkWX1!@`aAyI7+vI`kDpe+T3Jutlpk>+JtO$+=sus?Rwwbp zUuuJjY$xLKXkt9&)Az2nsYIHH4s{&FnX{jMWb^RIUy*I6o@DdELVO=E+Rz=Gb_szN z&CCR8Q)$y3VT1IoS#4&3s%Vaw5ISGcdI9Su)>b&%bz?k8_Z(&E%Eut(Es3Pjb+;GI zBhJFX(QhYReqW1yk^Mn){gJrU%_K3Uvq=Y5Oc&e|1Trl%{~(Wz=QD76oPQ7=vCcb()Hu;(zlzjf8_zuop)4!?p2l?zR z;_vS*Mr#WQtos#FDyA%(Dc0AvV*6y+U$<;@VYcABI6krKf{ExUHv>c+d*kEbcfb|& zYI*1$-*fZ8;@KCiKUF7-3;XBwNu4XYC26m5<*U8QG8gqs&g9`Ln>|Cg{QQYH#fege zugc8Gi*kgI8-T+5kPs8gCJ7(C+%TLSNqkZz60q{hquD>SYlsB;7w#}y%`DYMg9tVE z&F~C_W=GGj3lU^|I4GS4TDLvrZbd=f3I=D3YQrf0Z%v9N=ko@1L#HYi9aTI8J}an) zRTq$1(5>izTg`#6%9)px{d>(jz8y-)pcZ83ooGw23x3-!wx_`Duj3=D*XWHSL7vZO z*meBgpTT{{f4nB5Sx>#pHvx)n(1ck3i6ZDmfTo00Y9w>!*{f~ui|`jW6#pQB05Lrq z0+HBJh{Svcme=TWYgJQ?zn#{F@sE#ag!@i%5t$WKWIWtY5~wP>yBXrQ7-tO;O56SR zlPgfPigBhn2?-$BdV=erceE(3<2-IZ?CM(c5or|f1xJdeKsW1eAdZnVrl-4ju7H-A zsb8GCRP2^EC_}vXGJfVwrssjmgioty511_H&IOZNeqP+@ZFx9`tH-}Z!?_Yi1tsuIPZACqp=X^tF=iVbfP}W|&M?L=jUp!%uY`>F#!+`kAmr3^W zRwR{;s?A!$$K)c|jc_@FBo~*nMFcfV&V^5g)h_lI4%fUl`5H(@imq2mOkd)Z(c=xWhmzS4%EiHSbfcU<%SXtR) zqt{sM7H&nMrSjd?+V(LK-$r|>huMv<)|`E7!hGK4+;mn+;eAE3IIg2yf9hx8Ii0+^ zldm&Rwi{cwocriAFp|2^J22%#+h^$D`C=l#LHW+F?FM>!OWxmt6KaQ>EzLW}CPIol zAJ9nk$?f~HlyIf2kKd|?0d!g+EFa<>m2KA*MExb(? zD9{`2l7)H(=-8t7tNT;p8BSn9ZKL*L36LoJaFvO!nfb{{jS+H#nO|X)C^f>!fbJUA zJ-l+Ievgm~`er+S?ENcJf2$^Fhk7O20tW-#2!r<|)0(31j&4OC>}*VOToKS6yqd05 zgB$K@1JJ-dyV8Gwtgj|z%uDEeQu@@Vmtfml3YYSy*o)r>+*?|S+l=Lu(O53LT;GE3 zUd<7$IAJ=%fy9dC!d$Nu_q4jMtQ+jV{@3STQPUuOya2cxvAk*yX(W{CkOa&JqZ%2I z03}(^N(n)~9;N|-Xe;P=2l6nihxvK5c4d>Hmw0@Eoug1^glSA@vE=CotJ6m^g1C{1j)*U{8 zcM}jm&LIf$lAIeV58n;iZ}E|T<-5LY#**^HEWD#_ioW@44Yzr`o*fxZ%8Tmxt528O z9HhAKN((G%@4_;hLS`NF=F32zXy$I_sP#h0sSV;EB%0lF#9>2ySLVp)ILjTU`V@}0 zMjmOIQ?5I;VK^WkvU_&thSBn^H!V>fzbq+Fz6axUitF#{?EGZ${*cbDZSMj&($ciQ zbJP`)+UavwyP5-9UKl@}?CZrJ9ShhsdzSubxiq8sO#DjKcBSsccG2OaCn8?}jExqv z!48tCfUtqAJfnF6^jPAeAOxi{(FpHhEEzC)s%%?9Rv(TT*}?T|mk|Q>LrA^)p;HeX zSOD8{dxe_DT=xnOieeyCGrk1l^fhiCBjy_mYOB)$3<-7fXVzIilJc|eHMarTWJAjcAblvCW<@{2Z7 z^;ysI=eXgIjm$1QfBhv|4QUl}7PSjU1j33KOmPsf$i@C6>B7#>FNQruKn{m`IpbMu zx}E8wImXZyK5~sUCTixs+N17r$mwEl!`cCG)Z4({u#Sm3t;3lwqq}u_%>2jaPf6E>y&@F;3{6t*rK@r&{k`l`zI< zwMy5y3v!}CiVMHWPG(08FM6?9elrPk*1JGsg3xw>^X=+gp%?GWoP6nk`J%zJ^tts; z2h8lQQ2FsAC!XHbQFH`J`S75-8DCHEq%@w02HZ_oO=sypr%2TNtsblo9rqr4b?WT< zzigZOUlz^|EoKb2QmF6SqbF7}4i|_1>a#psPztRs7PWenGscYpf&+QHBZ2bySE=Yz=S%5Jd}S05bt$bnLU}_Lx{b+9TF}5b;K*p}?Xc#zDFa?! zQ5qGl$8!nji^8$>VECHm8m;RZU}bE031WA;*9|3P-ZdGV)_BCtYUg6K3leDdxM?Q+ zr)O{0LiHbQtqOgP90i0y3n$%EaD}@dxC{D1Ej0{O1pYzvE33ZFPI1l!>wN2b@#&i1 zMz_D_)bH*4*CrZiQQm$U1?L1wV~NfjpnPqt;eX_#hm`3@zseUcpdDKmZE0zxs!u#Q z`HR(F5k`u-|X z5;I>88eN#SJihJq!kXLA%RtijE3d+e>H5oG$Ku!q?;o>cS%(U?gcLNmU^T{Fn8OqLD;R}GwEu&{B>)9Cx;GRtIU{9vQQ2w{s8=pn-2?y3xnrB4f|E% zJAB5?4v-c^g41TN3!OJ47+v6TV(D_`i2DMoCFpaY6bHy5{Z~Z-(FVB&1gQHS437@Z z6ML5WK66uuMX@1}dSVQg4yTvfpK9E>zzB>mY1HnQ1+e=*w=*oTP|EPI3vApzZ}>yt zoOSYKyI|u**LeCvZe@UYAd+8KhAqiG0WU| zA{vg7CD&*!ZRO-bDK6K|8p^L5KVx`>9h5<%NvnHPZ&q5n`0st3r8#7+vUu!nPimj} zgT(j61j&dG&R0pjZ#y6o>dp8LZs#kK-8*;imJ8q3l%T3fSRxI~=(no@>;Ps8s9O*q zJ0eqEBFra2I4>K2X^>g}*r}c0ifh((L}o$TL}mo)On0N0WTbfUiN0FjG4Sp%Uxsg-5I@}(dS+NG%GfxKf%t(a^5mlD8W03=1S)#a3Yf3!DnR8eu zzQ6^(dowIXmF54{Xu>?=DZ&*(BXf8Wd*jFrgDiLegNOJGC11Jo+B32(n;YSOi=`P3 zwfrr+9}pOuKu6!Yk$O;7?y(yF0va06DsBb}C0r&*hfhT5SfUM}^^!Te0so$rrKBQ> za=RR%UkDoLFF^xz)s+|7vjkWFa|K4HQc&rFQQg?&PG;?~xb6=N8(q9-dwOJTJ;imJ zSNblrJX0%f2nY=BLjihD!$WIvfVdfP(o1vDw2hq$PVq!>p*9~6N)fd?jN|#Fh{DCf zowPMAZNH7)J}XcGM=go*O1eoK+VVp=q+Oi%4^nAi+3D7<_CZNQ& zRA4S1irm4+L=M8L?l<~z5Eb_~O%Hv;Wc1T&p(;0?ZBhz!bP>VULuBb4TwJcy14Y`d zPm7Y_uGLzU@=}Pc*srg-5|{V#eb2%=js7y^Nq>+F2^0^DJ7+eEyuO^lpz3X{lu?lV zWg>4by#Nr*sUrD{I&)m zun?QWj(A%J17ci+dYcO*-0h3AMJ$XcvgyO|9q7s;^fqM}i~sAnY}C12aMyvaXDrc* zbiP)vg#!e~>s5wJ2Rc=H*YWJXxA*+;LR02_cRok8)<91-_^2aTrc$bTG{2zW zaf#g}_F*983Y%rg<~~>gXe(@f8ISB`MSE7bFuSn-5UCMrwdP2tN@PA6DhB}lX%OH* z$hY};MI)%qP6by!8k8WIwDQaWugdNJY-H1(TlD2k{y{{mwzmXbAa#rku4l%rluKoC zIceX%0-}bx&?Vj>@FJI2t`Qt;RUy#1e%(F3I9(5 z#*EOZ^qLLjJ^Tlz2E(`cYxfaa1)4)24H}SyGr{-@4Bb$ zl4LZs2F5B@s9q3SjY2?ka@>$-|6Wx=2+`5{KMWfD#L07uafX9PQ)!{I)11q{x7S9r`&pat}Ofa zF(>tSeMvenMkyIC2c5xth7#oBE#I_Le~e7iTar2!mV5nRUCDe@nRk2|e2`#G%0L{Y z@0ZKqy5*0QpMndor?Sx&xyA&>F9SnL?Kd9uX_F_b$w^5twxIwk&Ph+fKCheJD&NpIv+9jU5wz(-(^{hVBm z^7JK@Rwh@bP>cklTNksM1euEr@ah>QC?f9XxL9+DM&8ejTjc^qS2`C{sX}z*W9TL& zYa}@xC~fL_m`Bl##gux7bJ4y!-MpHr2++z%sOJC~KCLXKe?NlYbdS)B`5Oa%1zz@P z$(yJWTbu6sgRG@}^PO6C;9vCnF^-@CwRN*TL!Xh)Q5bjz=M2jGq1jvg)ogwNlpKpv zS9k??75MRrXpXB3LCpSAMb7e6*AR_8xYXb z9oenzgjHGjh}sR+=Ca6Jo95-tdTe>}%(n9N*lQDQv{!WDg{8AeqA)r10$MKqLFBC7 zE$NpnihbABUF(ZM=~v$EgJM>rT0v!%^nFCd3E|jy9p?i-HF%#n%K_dvzTleY)d{P- zOy5#k+@}>WoksD*KgjWG(O03nXdZMU5#x=}ca^PpssC$CR%sx{=qC&6ATxZ`6FOuG za1y<2f$OUQIcp(z^XoeQR_)-h%NyE-Ce@pB+>=+Zeju!R-RqlUE5dwCWq!T%g)J;H z6_v2YFn>7^0d!yP>5X0xEmPK+`<5#5Hjz4wWWhNA$aJSWM`R-*k>%Sh5cutu3u0E^ zx}{qk0v|`rCFm^Zn8+vd4rTitB%DMeSR6*ByP#=KGr9h23;{!6Vyc#qlg2tvMmQV& zuzqgZqh1Dz62hs zHtc%{Ns%pEwopPyl68huNV1hROH4>Ad-kmovJBaUA$ztac~G{pOtOYhWE*S9He`Rk z>*#sk@B7Zrm@{Y2IWu$4eP8?a|F5?i*B3Iq5r$T(FzPPpjJwRjK49+$7UbnX*7&}pa$IjTKziZ;)jm%glA%`Q$5x`t{ zMqRwQD1wN|^$v54QdlBc$}Fb&S>ZiC5OLr4q?=Z8Uv}q;<-Jo$4_pHD*E~Bo)^1sm zA>|eb(zqRxNc-8Npxi8^vyq~5;eZ%U3_=oTK`SDQ&=SYz9_6gt(618lIaezG;Ke{F zPt#MrD`p%qH{fHp`xOy0N+4ze=2I_$lnMO%9D9$oTHN2&V$`f%XMR zBh092Ohf1CP|>t>vHY=$Q~>zGITU%xh4Gjym4JzL64~c`1HN@&-(@{?ji~SOyzKA>FfmT>PXlnS zKwxF_1XnBHN2&Z#mE(c?Jk-H~4yK%7k3fDg{_=GXHqh0;fC9c6EHq6>0UJc65mFo! zvj7(OkKPAnR6R?-!C6BJi z1gQj-wrdcA-dted#z(KOE-Y+$J@Rtj95((T-D$9O^`_G&$L4VzN1Jqe6d#7c*XlpTvAy+WM|N6sIAbe4zufOoV9K zap>|9^Q@mPRUFJSH+CCe-3%!Fy^F(i_3bLNFD`HU@o9QF^-pZ>T!;c$4#Ei_4*=~> z7vTcEQh%4zfs=9IWZ*zBbVI+wO3&8uu-Z2=0q5yF>0Ss0IVJ`ir}U| z(wAe%!TXs>MB9QZSDQ}GgP#*k z^Z?8iKRlyi1xzcI?s^w zsL`hFF1$-)%X~Ou{p{5j;hg?)vftJ0oLxJZ0(+XTFw z5lGo}nw9Nw;GshDxhKXBZz`MPdt?$)n(dN#Jb}L836Hc`{hFUS z7X4F-?8_}i+MO<_&U^2$HeAYyOPTZNFvC0GdR|;OApP|4t;S1aj!w&YfUE^LKPO1O z9Y^Y@?%s*Im>CGSn=pR)8zTspQ+M{QXryrG1{S^$q>+(~6G|uuhk8M~Z~Vgd;Rl8o z8@|C45tsYaBJx}~;0#Ea*h8VrM$ID`l*9pPB?$@XbC8OI+DPbF5e6LqEqFf2d`N?4 zxdVXndDHYJv~){>;6({Q)uB~^Xa_wwO6|I$-Eb4c&^7P-?@L`)^-eq73C+%KdEQqVDyGGh_4gDdj8GMh%G^#;Fi3UiBy1r;s#thgO*S85gLIr~R zhk{OiiEVKv_&u=DJdz2e`GB;Lf&@XXv`N~bFwVp5dFeSfT*t_DJ?SM?D|1atyNm1( zC2Pq%jQaV^e zGZ;+}1Yk`$5prcDGX*xK3?iBl6Z?)Xn_LVix$EJ~UhEXtLlJ%6xGD${Mbz;wX&`-v zAwvwK8aSg}51|GCTwapcH>fwLKwCWp7)zQ^t`{`fK^a%awalzN6J9pkJu}rHlG#-{ zXZXPp+z(pqcdbJYyB-RaG!K=62%8189071=znxOe(cq?_e@@9;<;jl%nwV-u)Fly^ z@=6rC#aKXQ#62qG`02xUrF*CxigDtIU0X-5RgPI699a4C$09K^f2`FVYHno{YVeox z@|}PB>PX5a_smv3to6V=+!&pJ3gxQX2JLTr`A<#at`68M-wytl;k+IEG%Q! z@DU{U&CXhP+0+lfj+@u)>^ow#lhd+Z=_iHzAyY_T1+1?N=PBNRhY)#>N+tt-Hq;LT zcg_DSEEnMx$K1|Gj6irt18_;kUpMYQZx)&r zSyf=Z3{e3d6)2*8TrKKe|1d=BA>&}bRh-6_0!7_*M*X%qguDe+?jn900xZs%%5O?| z>Y!?fWq2g+z>Q@()GP$>@fSeyvAkX(^cVlYIH7|AN<&%zIYFMT-TpRSbu4Guxyn}jtTBL9)# zR!L(Z;IGuDXkeLT!HW1Ke~KYKp#tH}AXi>_SR_CMU2 z=B4m?)KeuzYMhe^%&kYktq6QG@MZ|QM4CMW8g9n|s8zzt zK!Pmgrv{TS8-)b+0xN|gEddBVHr^7_F64|!FLVoIb~UaFvp#c1mF>h~#?aR)7h1(w zpqheniT2P%c2%wef1ANY4`n(?M`#Q?q>lF~I!`Rus40Js^<3ZZm;~IK)TT7&tvaJf zV%t_!_N2U2X_wRICD(KlZ)ei7OXF9S3}1itF{4oS4oq(F7vb5h*JKWJ)A1efuVJ1`G#qcMjDd);nh_ zC!XM(3DW6A2C{?Q+e19DU=lSopJJ1Z$WnnoK8(hFOe3MCBY{*)fV2hs9ngh;(5t+D zuz!-f=IE2Apf{XJ(A}DJMc_QBR{=MApmcn7zDE+fu$SW6q4=Q4vwB#&+I>C!>%zz& zORl^Nu5o*Ks5EQFTY-4=)T;lb*7^SUki4EmyO6~Ak-YAu>FRWC3-!AV+V0cyS`vro zSh#Z~KzBIs@)f4JO%8?(&Uk#&`pkM%#p18Bis&39QMA z(LNBcWl|7maDWEhw(Ir1*PlNz2BgqzwI6H+s#2$WFP-TFe7-}{8BHh+N7X6rKyWA_ z@AWa)1d_Wq?{FGNhW0)}ZU%}X<*`lFsACi{?3dv7Ik)jY8?w82k>6hn73yq$`C+hy z>&MGn6EQLT;d=@s@a5F11G)tBc_-ND8E8DG*Q&e?))(QLIof!w8jDtRVf3Z2tN?GNlphMpH8WTWISMbNhy3g16etA_DCS=B#C^& zbC)*bfP6=mDv7cPvG1Yc)$hQWK%u3|%I7`jQu+iS5`Gqja3z$)qS_vVeF7D(pj(iW zfP&$(ibEd_iugqX9PEAW-;X6fGnJu?;iBqya^RSBmg&z4&dEa$Y;|JE>_xL4w#wp* z+dR_Fe|QIHvbY7yK2A*SIsW!RS6GZ3y*@^ohan;eNE6+*QuolayNG z{ZJsAAhR97=BspH-e%B9e&IyZCzsz*mAzVb0Xy>w@(XCw4$+4nv4bjqN9&4+bwjQp z@ndQ-xmUt^uRi8S9WF2yfSn*?h9GBD)<1p5Ux*WcD1G9ty&y0qU7O7@yBvth3+*kJ3&3Eix4}}=o zi3plB9m~IORDtDDT(R4p#y$J)m3xBMAyinGxuPY56Ml&|>!0E4*^4#C4lWa0>W)rZ z#;xwm%|j2nF z8!&i@p>m!- zU;9hGmp)PFe@U0lSOB^Ww9>b}!zxuBC&iGaKMyNd0a#|_L#n6Rjk*W|ISR$`SzXKS z&Gc&ll7iV+yWp=0DzuULy=sXVYQ5xVZkNIQSBo}ugPA7&6!HNqiV&WQw0070oafnP zoh!wyRIQUHI@kJtVH%*MGcrQKB|vru$M>hdnz&;H7TUHRzt9C4)YbG)Q_@OTBeCKN z0MrQdLB-yo_H!=W|4=U6j#Hs8;~`zi;?Xm&&o@e z_Ia+8K6S|3%o0VSOdfo(I^KNhpw>kwUu(eJap3-rdXNDTZ0xbfBl$=HbFb6cjQ1l0 z@mM5615QdfQ$9)sxK-FPGLWwm!*vWU2@E-^zU}9CZz8E9SSS_Y-#sL#bxJ^m#-8$9 z1iy&J4l^M^VULEezX$5-b*Nbb>mWtNCZQfQtY1(V@MIs<@{)=>7OevNfCEAk*u^iS zs~MyV)Fe-gKZb5w|1hZ8bh_2TH4t(;?k-25Q)Dh zpd6DD8){pEAZbylA>GSmTQY&D4yoejbAi*vM*_h(VESIoin@LPViyMvsGO69tKWEE z^|n+pR}&+OBk8!Uz-?aj?0->pe3PyVtu4FmTXy6<%RP*8LrGowp1Sg&<&_X$3y+CS zzmf(PzhI;++*j|3TiHo+U}7 zepv1wAy=1S$Nl;*)_D+)<11*(t%wlF zkG!Nxf}|fOQf78Bf7I$WRLcHcw%9ob{y&%~q%4=h0ZN+%jB^VPwU8sI=ko@ zTLH!6%ec0^Yn{m{+aveLt5Ih+hb?rbc>lbVbtj{bxIe`>iu>!de3@(c#O+R+EUB!T zi~6S_)uqGBjTANH+#nxv-`lg&uH<*+nsbSCgC}`eBkEJn8EY#enOQU`i_6z2hR38C|*3JUXtS;)9gI;UMN1wzkLCQJ}3XXj+y2lhZ6xDEM^ zPl|xBdiKJBivMqf>A(nE8lS7vsozt_xRcYAm%T=!r|}AJA@(n&kdSiEMnF~g?z8O! z;^z;i9KJ{aVR%f?OZqF#j|4(NM+edieVPjGo_eA}siLY#+pGPZO<1Fubcr8zpOdQz z@%B_~09dJ2jETS%$^leYm-xT@Qi-^jeUZ_$JUQ6VE+|(8Ts>@tF4fiXOT6qw4W8HX z*n8HzuKR{mZ}|=C&hJ*_O>FpfTGF*u)dChcz}L7>aZZjn}1QtVZ(om$~F2+`m`XX|93f{fXJvr zk0Hl@R_9^vKXE<*4rJ-x&S2j4ZOJLYobq|N@A_9Yh`zZfyr!y{KY8THR>%3=yBf&0B}4Y!xP zHhsVEM2cu{`mtI`liy7DUA(Jx%4QeMp$PtHP7PiM*RvHsQtFBD6)q)z`(^W8mak%I zwQ{Ye$7&9s4!hM>W2Jkz+U>Z{i}tcGI(Rb`+4->CQ~@-BR%wAL%9!K8HU-&pEQ~{2 zaNz-s+BB?tD!jJ+knP>jeNztXE3o?@Y(#m0MbMsSf~Q7lt`N6UNmQOI?Sf?E`?_Tk z!${uPzo-)p)9!0%rSkA+q~+?>Y17mfQb{k3`>g3zz+MAYd7BZ4!_>++N8%$dIbC_S z6UOcWO{15r8x6v29yc!V^V{EzKb>OldhxD6vxuI6;uE0IS(pD6dYAc)xF93cCAFJb ziOEzxwJlNs^zKVAFcV$@E5?yI1pyk4w$7dh8I^RXla!Bj&8R(xvv^42&fe9>Wmf$c zz7ESPlGcXtvY)qprZNfI2yFXwD7(8M`xCZqCO@E8Md4&MZFJWxaeZJMx`#C zF^A6iHxTeeDu&B(loIjus3+1;dIWgMVoc@i9NnYc5p6d1eLcB}?5__#lnL;c^$c7Afs{bSY1dp~~21s+~Ol1PUt z!3{iF{d&!|(LpM?CATch$~%Ffz{aEL9oXc4^9wY7 z2CEnak>|&O$i+bxNC_j`{w~m9`S^foAfl!z07C8WMuh0btPdz^ZDNw20Z)^aph-UB z44+iVk2Jb0Vi0Uh+>H^{7LwL%VT(UuD21T%5}xzd2OCB){1D&TXhL z-ZE135%$dSc1M|J=bpk62bQS*3$ib{r`fgK+p%XJ4Lg*$xU}*-=#d$Ws?u^9RwRYo z;9E%8sKKB;(f4I5vZ8A7;jvvl_)E^QKS}*_XudPPvNc=w;T`Gto-OvAi7H%d>!FCe zvX^A93hrs9_rOMdwefk|c({Ik`Dt+gQON(M8E#7CZD8%n0I)W3HRT19Vl^?QV$Gb3 zCVk1Aab_2dIGL(#oVo4-%L5haw!IL9OgV}3=66*?LBo3ZO!poDaF8w|p?0w)gYUn&K!|=3G6V z$n~pjjZG%?#sscg^2WrIq%+{fRLBKKC>w$KryEXpw4%_7C?yx#tgC2n6R@mTN$Jh z7BW5Oy%){9{y8^Cw8VZ}QU9`y&*|;7Gn(6V6eo7NZ+W+###*{Q8}jT4IRm2NqJwm3 ziED@mSG%p#f(TcMH170VO5^zXcI*=K+0b;#@&ZaJ>d;V#{6<~|h7B5Ek%W=yhLjG+ zeny&CxLCVRszSjA-Az%tyU5S~3sa4(ft?wc`;WjqeglxZpwnpr`vb45=;BU^pY}ag%E21FF~Vs59vhR4i>>^u=EH_m0g-B)J^yS@w28o6fT2+rA+vJ|nQ+3F`u25B$uk zmt4(sW{2z%hfyDH|B*`zyBZ5WC#G>w-zqyvwg@R4Ol)6q_E-PDyj!!F-Ej(9EMqq7 zBzfAsHpW=Oo8B+$JBgUR{n#%LDFB`8J;+lB{ZvKDw48!1QEQ^vSS_W!B-KmKF1&)r z+jH)j-p_vMnkce~>X+$}SI2#uc_>_#$E#zBYvC>I+fu|P+LaO8|DY|gCAHUv(QjRd z5}g4I2HPW-hB|SHO#mhOAU$q!w^)6l8sj^OGf7m#`X%|S#=CAj`yezb;lRpO^i`ny zsi;BM_<>v?66bX6I1S0no7*M;CR~r8u|0K9{yn!jLKXLZ`4S99jGJwF(#ngRAV_wf~ z`8y0vBUBX|bF?n~<2xTb%uSrAGC;B)`P|R0^mek^gA^t2E>qs-Ew}8No)_=dapdQ% zxiDw%F{ioUh7xx60?vpvw}$+fPEWq3G2fw z@2E8Bj?}YrcEvyijvO;oy92dGr8?Toi{mbAZG}iHQl%ztDjF!7*CnJKWc>*2rnV3kO^)6!3nWqsY)`G`%T$Q$)8A z+NM zeaH;Do0{+FbVXDObaSrcP~}j@!Lh*HL8%|4Nqw3pmc;(n*Lqzi!ZfyFZLnBRXK^f(+Lcz?$!NA-b@b4^hZ@`bHAT@53@%c{#Q;v z$>Z@LwGbsl=YRg;>hFeXt_`k@c~_{EJ(J< z`_S({BK*6y1(f=X({#%2NY*Cz_mxd$%LSI6w9XYKADYEBkiR;%T zFw#@WFJx1b(^JH)o#5ub${s(`YYKE>x!Iz$=9heN>LLKOQE( z*~IEt@}AMwoJ&er@jYYQx*K!Cnvrm%+3rM9-qv}AlAKHaO~b?(JPujqbc7(fcQ^BzqYG5UH5s#StCkW+ zgE7ifBFKBbcSM$>3$FlMoDm6Z_Or-t%Q5!LGr>9$ATODGgd1ag_?YPMoa}_<7%{yb zqH7`8ZzrC8JbUxiL(9PMSd< zEa>}CUA<8!w%~~wxj0i}k(Kxae{iA(cW`iM%kfreMd$Q*X{VQ;$D5h0MM-wa!&56u zzm-za+VbiW$4Ns@!^a~>IHQa^6eFe{%1;(-yR}AQQ^$uLyo$6Ll8dJ*CqpEXOR7pt zZqKGH5M$#;;VCusZG(0)^ngmqy_-!<^{x@aiZ2h4JvZ0-^VWu+oeP^^-`K3X^vg%F zX%$0SaE||g-jVNL@Kc6_?VR7+vtN7^m2Zu%f53io_mlDXE;ze9ZE?Yz)uTgO-i;iU z-15aqV-_@_J(wKM^D-&zYA-V0F~`#pLT{$3mSt*PECzCqV?p--l&|PpQX%6T&1lnT z&FuXl(HnjBDQ9owO%x!+l}Cv&<$tu+e<~*Z{EFr}8^L1cn1{vuoS7T!=>93+;cKPRw7qAS9`+F?xyy4+AT1yS^(A6n zQ2+yBOkNsaR!DXkMi0qV`@WsbD~yt^#2gQzpk|`Rq--&_7&1kxKTOvcYvr}n3@A?w ziG8b7kIXB81O^@k`y@7gf}#eBA2=D0G(Y3|2pn5dlia3^oD}(Kk6#^m3-d}sg8n?^ zK7cnxluA;U{2F1HQ$lg0N?Rx8kK^^-s%u|y{oWQvU$&y+ZG8mSxaG|}-*=Huy%5*T zHf%YYLg_28)epf_%F1Ni}XsHNL+)9?MC0!+L)Z zf&$5*ET$4Yta6$fRl0KcqNuJxMjQ-PP$Dp5&w_5yiICvqfU!FpjLZb~qY)C4%_jwO zkNLwZ){RfaRP)F_|0xOIivv+fk1QiJeXY9{taL&Z`4|!1;cErVA%x`+EQx{GeA0M! zUX-Y#3i>7vQ9I)d#70D6!TWbMp8Qo?$m#BZ{;`Y@;W=9X2f|Lx@R9#}CYz1M97;rI z9nK?;O2pCOISitb;EZY%IuT;O(Ic7piCX@O-77|~i9D*c<))!G>45i7b1q6IsNIuk z!rP*V6b&}r6zS)dvYu4QZy#LHyr-+h;@7PjbxBEyqisICAAXS053>>z6C;{(D0npO#nFvRY=nEAbWd%hlD@ia0@rYAv}% zdapmvNVg&?ZIU@tx!N6MzIZ=OE=exZ=E*7QTaAq)Z@PDA?|8{~Uajp&ZclwLU0^mb z!YADNaSfAhdvbQHgROir)pcJxbMP2-;2$@w^Jiwwq&$1Vlt`^P zbo=DJv&^x9M)IH3%1rXhMWat&9PA-Kcrhi_8V5)k$`!w@ZF!`}mUc^(h~Ku9uW_g! z`CWH%deu=l?HN~yY}@3Qf1=!ruy4=ys(np)A4IX=x~56;Y`&3iKNzKCQQ|!a2|opn&7e@3&=DjH zlp$}AOhR0~a=yRdk3`g#%L36dpl8LmR=J;CD1Tu$@kuI>D40j@OuthOr!MJR6-uYmm((qH#j=BAq_1eza=bW7cfO9R6StDrBST zr?{uVVu$GnVRScx#YV#nYiaeS*# zp%SsA(A=Ois)vsNqx5zBy_2G>`8Rum#b8%(9A4{ZA{|Xz!XLb^lhDayeqT<4TpcG)DdD4U?v+r?WJVA z&cjgH*$N(4gVeN}>tJgupERER<=3?8>U3V?6~V)og1zSCt#Zq50kS58x%91emsB2U zb7|vO<=^cXnmGxiwf7go9*uoLTPO>8mm18<)^<8F0QSWusoT5rQq8(grK^8p-+JD{ z3?S2hAJ$KNxyD;j1zg3K=Fo4gZST#!w9E#Rv1)zI&r(P3_xaMohL)6Nmxf#OXwDLO zx9{%l&gn_R4lK`}t(tUo8wm8izEWrqrWsw-A;Ok(A>1^A?If5b%h7qws(4N|JSTHZ zk3hHg9chz|ulIbddts!&tjz!;JtojFiz1~Lrl1sKewf^0v*}#f)L0VR_*nN)Sao{; zpda;CjDn-o>dxLnsC|Ko1Z-Z!BU#Is^_r+}3tN8ZOYy$EW^>tA|2W`mUP&8DG3N&& zowJQAeG1MlzjW*!c%wMhe){L;noF%!n_+l`))c?gd~XsKkV!_!656jn6n64YGEO^n zu1717Qlr{TA+`ml&X+S|Gh~9P>q7G4@sHjnl z)Xg{42x;elW!JYWq@e_^TOzp3|QobH^DEvBXX6w z80JRwJfav6Am-=-mA^ zeFbZS|6U-f5e`5!=P@jW$nyMu3K7ro4yd$O^!Tn2*|^>M&CG zeEliMmNyOuQy2Wt3GxWYxlZ^XN)L@g3j6Pq(XZjjSr3-)Q_Cv>C&<3b zs}`j_+VVPfg%|3uqbpOShV&OT$?fJLdgLyZ4A+nPSt009u?9`clhzv(8%!gJjM zpCc(L?i(PfrYSS+&?6SiW}Qy7Q2N8O;pM^mq1VT+;``j#q@U%{3f{6_ zAHzSprFKW97dHH6x4m%&^V?y2{iPxCe|7^tcMg8_?zBmVvO@AQICsjWo7^LVU$;8% zb<)qVYyH?g`ecyQy1teSh)q-HOY?}WDWER39thS#>@hcTs;X1VQKrLb!chiTv|01@ zGgDHIRW`h7g6k}P`XoHSE(!w0CpIR~{pL!}?O@H1bcVG0Z$L^P!*W2Zb?Kz(YyG#! z&Wqc=iLuwvFp8b^JF|8!>Ss!u^Yla;v3J`tBf|QWm1WX;4X)bnpn49+_~iSY&e*h{ zUL4rss%oF&Bb2(Bj~RE4T;-7pbAWu)np^^5cDcsVGTEK@eQ8%oxyq@%o!S$WOeTTYO%>*;vqkepOEOtXW`({TN#rm>GD9L}?WTO}sl z!&c5CCAxD*vyS_WCZ6j`QKyzg%Q8YHM?gk4GQ1%#(#`UgsG*3}gXf()+HE_kNb~}k zQA>!;$;V-4JPW!pY+&OFVfi4$Ct#K%2595_yT1hQCM|!=TEHBCLNhuy(@KN5@_!ZFzIJ$y2{Yo==c&(g$hNa6erTG^=Qo?Y2L4vQN*72LwP zrP?m-I#E{^o{}ddq;)8}W0fzOd;8I6;Uz%9I$j@3>+e|EOm77^F7bXMta+&ScsdX3 z#80d$btL8tCan7I`FkXHk<+>5U42J{)8_q@vzJy~rZzE_ir>8*)yKxCCw=k4DbPqq z_UC2rx9gmoD7ES)r$q5GSllW(R$YThT}_%sgN98ubD4E`IOJtO)g zbE#?4X47n|x7uxTdo)jCm)@JZ{_?)t>6a$AIs{q%VacP#3o!}XXl~y{58U9ec47U zI494~y-e9X8Zts^4Y;#L6@(Dc)ZSY(5c{qApWk zzsG<`TE*gpUM%+(Yt&BJ@^y4*|3)5;&D8P){86L!$@i;gn>sCJ6W%*Tou@lQ;l#># z#MZxg`B|D&X94&D9;0iuS-dN=IKtRS$P)mZ&Fkt+HS+|F*+lfV@x1kgZYBk-K7qD% zk)+UqwNnyucI$m;?i+bOu1u_3r1+fMv@qKQTG@t%LPE4IZDeRU9f6DUZ&3$|N@@}+ z7yxCV7XQN`MA*##Yl|8%aiB6l<-$xxy%sk`QNH#&Acb^kALP?J-sD+>;YrzCbf0~c zn8jB}-UK_2UaN4?xnKU1GR-ev=Phwt=cFyJc8yN~l;z8t%sIkKri750czfQu_rh0Ua;AewuYE2dW=wtSLK^mWwDdTV-MQ7OcPs*IQuA%=H#Q9s+ z-r#+)uKD?fK5G#eu6IugGUia`fF*lOL^Uo`lMr-+a5+a)+@kUJ4T5SMVyA29zkt9q=l)yq3Nzpl6hJ{OXpVGmdK8Tf*k?B7Djg6D6d|ZD)M9P>^77gAo}C7 zdY4|dOij<<+4ecAGphu(^DU4^#&(fwxn*NXn@Wp{S+l-M+wChrX!`Oi`TdsDu;a^# z)#|;F?2(@K))qeLmT4d5YcmxlMzaeO&WYIUwv`ok-HqYIn9jaKsgfU`Slm&jhShUtmYL#LPUdVE{j z-l?onE?k#O1JI724IR5ji#O4ogI!yT){#2vHPuSxTct)TiR;jbVlv>I-P6r1eib}8srq2|2w}7H)8IjsWAiXn z^RXe1#YM(7%s@|M1e-3-1ZzeYTn;x$Pv&aJpoK&@7>j{GHSUCd7~MPLxJ zjj93;rhCoDS1y+dcaz1H#6n_IiQU_lc_l~YR(VGf<_5{jz491#3GIRA%#fGv>SuK+ zfRmT25z;HCxQJR z!lUGg;@CPKJKEne@7H+_&9_c%N&%uuvZn`^`dJFDsQ+rC_xlzeUU_zXbvtklBwckTC3!`pm*p9}#fT?Z^`zjUkSO2HP6tDmM@2MZ^W3 z+WcLoV{*;0dt=kgIrZTEzSD-UbxqYM3>9p&TbDBjqEd29Pv{4Wc5RC4<^XmwmLxK; z@#O~c9}L{<{`zqk{bH7hoTD$|Fq(MImGYEh_w??qo1WD4y$5?~o&-snbNXfu-3^lH zx+ZH5oqq8CEL1zJNqDS}=gPsHE##3#j)#K{Uis#T(CJ6BW>bq$Ak#cL=Y*y{^GVZB zpW+|07BS|)j6Ne`d_wb+D!lIfZ#9QM-0%%S1e|3m(SF!-Ni_xs)Z>s_F-1hd4Snzn zL@fVbH#avU{Q5=Ezy_s{kI7JpHeugwK{#|GsI!A1BZq(qfpcxqn_Y#zM^# z8V#BTsT%RS(sSt7SU-Hn4yMm>VNY`mblbIGH6?a_cq8JxZEar2Anr#0db$rL1hHUi z00vTiTkFqWzwasre>C7ekEC%^v;ts#+%9=sxs<|3HgvYFV-mkE&W^5lWlgWH7`-Q3z-(_`^W)fENLv2#dn+B*Cuw=~yr1L257hyVSLSx{|5*Om!YE6V_wpt@_G)kZ zMB@$Tx47kp{O7*@SxsD6<<08Wxh68CPh6b}>X49uX(@7gc%GTr+NL8mzW?RZ2d$RU3I9Zy z9(g=^9(C#o|1e8qI5dM|jT?#f_Wj{abEF@D%C_v#zvjFsURz4<9QxI~!hq8Z8~-tr zHZn_$qX-6;sA^fl@t|6zI21SP;WAjEtSBz5sRd4>geEnDd`!1}$>PDhOh(Cn{i;xgf(II3* z5w#43X1#u^Ux^PIiXZcMeR~t=H`M4FTd% z)w~Olb6x4fzbMClQQB3%?RFi}CB~D9Z?yt`%@GINci&G}m2mnf5%*wMQi;#|Y0NjM z^eN2s;h0OG#g%skEGU0&@FV&smUqJDsBMnbmGue>b)7+5!4Ud&DF7;m%GAMXc}GIj zt;wHW=0`6o<0O?86IOmg&S~N2hA&?BtuyKUpLBc|QHOqZ$v2(&0B6a^O|)-nkf~`F z&Yew7jxurkX91qj+6n(az9@a!^E=tg+jG?w#|K6~*?FqXsgu63hT9#H6N!1aQx+KG zl?9?f>lyPhQ)_2O8CT<0-dpVs+&^7SucnDkojXqv%)j5X#!9OUnDYCp3B8Hm{z^$+17gp1>pN>hh zFyMNfrc((WPjaliu_lF4T&fioLt?BEs>PQJGVM!YoYBUM#@qv)kYfcpV_$~|iRvG- z=TI>y0vnGFRFk(_lpEU@H(D+wHOAtIeM0M>j zvkSW%LB7$i$gM-d%l#dp9~V`nQ!xFWcz+4V4?Y`@EXDNy5MGwyxx34`C##J&7fh5q z{$@^r#p^*V(YJmnaF>9WyJTg@ujm!3@7TK4v@3Q?5A);0)bPJKM zIsA&^Nb3WgA+m0YOwyPM(PpMif+8!fK1Xm#R3Cf`s`+>OFJMPF=@PO(91Aj;+SC&S z2qU|pRDWQOYDzRxX>n5(kv(C51r-?Th_Kq1LjMUP)_j9}eV8}a26R*|Eho^ibf`Qoh5^FZh?|m=dRE?SExkg;}-eLW+4Y2sH6zWjk%nM@!k)RnY$hUsX zepEA_t=OG>OpF4gn>akE!P=NHD5AZopAkdbf!WW;0 zpQQB)>4nM3(bD4u^8+?45b9c7=^T8vRk{3j{fMw%di74C=7$MChOLSe&(KOI{a}Wnw{RoZg?qxY{~-w-%^k?W_2g=Yeh@IOm)x_;DN}^{_3*p z0W?Y55_N0W|CH^OF3nY3`+?=#HS_XGT<`O=@VN9rAri=^IX0O8vvDY{bw_UYeM!Tv{RC z@{yZ+8FGWSo9x_YA$fy6FDmK=_Xpd;bUlX3A}!gJ%4x$NwpK-b1R1eQ*9=4pO`z`5 z&eT4;|AA<=ve9golFjV$YRbB&(g&iY*dpdTrpeOIVw!7u^ehC^@gIF;c`yw;Kuo^^psDc2EwgOT5b#O3C`_}`Befie;q715yp?~l-fubQp&b&vG@24Cv`u2l~OY5>jU?N7#9@A=PYw_B6jc zS?wpuR&5e_eto$&bf7#xl240P;^`&UyR=6lv4HMQv^pNuMxg&3gB3r-74b}==#KQa z{LU}Z->~O|4XRjP6ZHh#F*-up1!!Y|X}U?g<-0%k6qlE)@9(Okzx^VOJLC1xeTvgK zj`X6>v#A{Ec`?=Fm1}mtE)UaVzrB#^=TY!8QAdyAq~Q51*pw#bQrjU5OZ?m+e# z;s{XzSLQ**L(CyZ2#NL!*F#(ZjD$^4 z5Jc~SMAaB2Z9s%+G%JINfyIRZn~EwL6umYodxDP)>8`29dM$01(7fAog0(* zyk?K@Gh>q@Ta&d)&iHG5d!=e#6?^}OsdoW{I&I&_%VvAiW=p0hYPYh?h!Ru8c92xF zh%qym9Ey;cHX%~!xTzdss~IwjB7-r5b(kE|8M8GGt+Q-$ND>ttZQHiL>lwS>|Nq@y zhcRZxe4gjNuj{(6`+my*nKZ|I)NkR(y9Wjy{ax}Yaxi(T&f`ER_Ma4hrAiTz~@ z4h~j@H1?nQxUb=m%-&ERr#I=n9deJc)XnnUyRY8Y%o4|zgf!60uAHhEu6^FG?CfvX zk378j#WJ6%bx+fQHAR)Od|yqtY+Zd{nbeY7t!w0*GZQt;vh2KD`9-VFYSgCcAFL#jd>tbcHSyEpRiu7T!-8*0Z^oOLMu z5b$ZqIw{kfd!}jP;L#21)MofqUu=-a&ufs~@%3f%S`&VY{r1++9p`p0@j7YpWJ`?I z^oxU+T>Q!wEL&(ZSdqAOf$55Z@>&;j{nHKstB=@2p_^S2`Sz`OXRcxcMYesDcj}Pd z0sli^JGKUfys^2l>QYnVo^6R28siVly1NV(vbSU!-T!`CdaP)*)*P*+o9%wJBc0N7 z=g!JLzjLPN7K&10Q}F$QRXOj@Ri%x6{MzTpf(7mt9|HcF)V?}FK&%FS3n#g)s4*q`#aH2H5w6j$UzSs5+|h&g?vE{e@KhPK~L1qU9QDA8)4zRw8XQ} zGConezCNqE(K%-~!(>~uhNls;*EU%zWKNS=ih7pNRQr6ghIaW)UDvAyOQa!2Cba46 zO%0Z8JGrBXYPrjt|M#rM89#oWIrio#eW0d4@Rayvzp{5=KrxVVyV=UD$~|lK`*zdv z3yDE|E6)D-(9{Fr^*dr}nyt*st9yoj(W@Mu-Mp`1cdhwd=;~Il=AKPjom`k`cqy{T z5*nf9K$2$X{fd{WB7DT|}~jujRn|7d7?$@78puYjVdvUJCY^ zIya^z!Mj0o_2|cQ%1(u6`pOVxd(GL%KR20v3QtZ<4Sf}t;HvqjG~VF{MqOE4LRiu- zbwQ>ZW6b4s=KnjJ&uV&l@vdWuxzfGON|}vmMg2eCJy6u1pve93^(K9X(8FKPwfw6) zr>JWG!uc+u9s#-YCg1|H~Xqkvu5K6%bEf|Y*vqH=;9eu{jt944;)FL z8@)boBjiP;exIj&wCZNC;k1#bW6$G{k^MFGf#2fYjV6Ac?u{=(y5g#z-;Mk*{;qE@ zZfmf9piWaiBlp7UxiiFzjUH@YcI%tAwfy+!c_!zVEZS^;4(%3NIB`7TK>z(~A?xHZ5n%6kt8>ffPcrZJBvewpoSc+5!V+jHBD1tjvH^cD)2QsST+lh5dpn;zr6K}Vs z9D{|^eT_yDQPxJ}YnZ|{PBpgT4Mna4L6S92O14vt>yr`Dg@GVvsp|mM^$C9DEZ%ya z+-*Y<(}1?jmn1|31c-aDugAu8#BV6_V2{`vrNKpGq(Z7>26W@su1tI){GYje zj?C9^XX*xv_@ipFKHYntPlvC0+;ijWTcLZ~y8k#JZ?p-&Td40Bayl?ArAF4`8v?J@ zp_-VQs`@Wuo^sV`iGTOs3I|a{%7w7o77c$f%?{C5BtljYafMq_9uiW ze^(sQE7J?me{)3dV#+|XSu$1WVp@@KsBoc81w8B93UvL;auRh0D*MwzBnQhw!Xghe zF7wd~c@%kl%fN7d!q)kQg^A6Yx_7Ux=uMixzh3X@{CfTx_xp6MA;l)$KChw>rR6?_ zL*T(Z7WEG?mvQsYsqr;+zW%b>H##XFgDEk==r^b2XqGWLl!clu&jD}WdiOtDY^f|6 zimW>KF?j34;lkv1pC4InD;XLc^r?9Feciz5+ehBf0bXx>GRFQiwTY_a_IjrdxIbui zIg>=!!8m@|d+zue=Fo;Gm6zsyf@Vw1cm4^YsGJ|U>jYU-cS5_3;$4q5K%JJ{{wc?Q z{K1I5S_JnNe(Jkiv*D3WOM>3#kdf;aujZoSZ)y{Nq@z8KYrozxX3mn$nJ{79PB|yr zk>e0(I#=JaYL6@;Y3xPMSaAXu06$$6KKZ-ZZo2lz(6er5vEJ9BB8Ke}Zose$qH^um zp($X6+VHPSaOO$OZ7=f+lu-;GV_cff_*t5}MoVM_Cz9Fnf;5|OqXFfS1^aW-%nE;b zqDVeD)abJ|X|>&u9yg}rSRL_I9#gYBt~Kp{Gf#X|wA~hMUMHkllgclwrks;S{v9-M zV_ER&1z$5)t$6k^YNcg^#;eqnEa|?>1TkhsGo~|dsEci3u1CZ{&)9!vD?^uzwk=M5V|r1U9;Ph& zD)dROjk2{y;c5QY!AMK~g&UR?fu^7I-rc1hwCqnP_fKB7NiXzKh|+M6<)Ei|x_1KE zS%T*a(k-8iRi=c#3sHO@`DbI`5mWBiLFBPHdWJCvBZ~)<@c4qhZl2Y^NvB6)Klcl) zL)ucYl>?-+6MCy!>qC`OJ=-17xzS8-re7S%+dbKpVV+$S=8?*UEi*<@infDa7A*5= zerQ+t`KmF)2GFbS_Q-yGL_JSF;o!gNq0)M@ahH#vE z6=S1Qu5|44*B3U&DH^Q!3kv3q6)TduZ>+Aq^#WFv@8;Fi258Ym=jJ=G^QxtHGOKrQ zJkxBHIceH@BZ7)1eHn$pCKo2vaLPn1Df9H5rqg#K-0#dHYe;@VylAaj*-6cVuMe(h zs7n2u?quFs-#d^{-T<>!?Ui4v`Fp>aRENX&)F;|zj_sMe^2^(k<^}qenCH&#Ph2SN z);Aj&i?Z=78mKxH<~c|SZ`iW_koM>@-*h!08c4CB)JI%zyA^F!x2B95K*)6#PZ@C= zpIh9*__x1>bOHGEc-QOyr1@|nD^@ECbS z`nQCSZC3mZFXa?5Jd^}<&eeeTDoXthzMdY9+tR-%`d4%&0HqcD%oF>ba|1W4fu2!) z)ZWt!RlkXH5?9s8gX6>8{`~Jj$ZjsTUj%Ji;u5yG8bAAI(z?n&Zr=Vpx34N8ctg_R zCAGa(ZTt0}T0FaNq}&sxdvWCP-0#CK4m?gMe`&R~t6;?-hD9hAz{$02t6pe(T=KHO zuXBo;BUYV!4GWm#Qe`Rlys|5QhX*eEvOi!+`8hF+pKdiXF z-*kMSK({gIpk-0v{Iz8{ar63@ZJOhLF=;GmMFLmvsr3&zv+f$9;l@2RVmP(PJ=H+= z2geXPttUWGc!T59iUv&>nI#@vxgTaKMlghzg< z8Eo(v9Sa?L-StTEi}vw$D}(J~?ro-c2k$-D?qZ!2IsP&Or?bUXL7V@sysv}yO>W}KKbbI#=1zS%W(ThefNV;zo%FcbVJ+uvVw zKP2d}m6@e6(8{@S&Na#jAyu90!fT4AF&QXVdEeXUT{E#fxBf3JY5AyrY@{Z+Vfi`g zx)Ik~rG=y77A$jwpPRS5;8MVEtUpBgyaDmY`oB-Ji`(Ju{i2xV4KPMtI*#8`0d39y zzMnmD^RW@mlo8y^ZbUwjou)ZPY9M9Z_*bN$>JaUs@Ysk@Q|U9gBUk9B%_ya@GhB-{ zGa`&iN1UadMohtZxGYL(%xF_>T9b-}8L`D0EZhbH#&9N7O+~K+)V)~-)Qw^WC1?w+ z={)(pETO76>?~G2N>&_$LUWouP8ZqJ*jbrwA3peey43si^w(?b%@Uf1lt{`<*HeBsmFq?#}_TxAuFl2~*yjYZ7|@0N~#q=G&04kj58}<5u@3 zn|2Hq=w8?JeAhql{%wf={T*HOr0?|2benfq`0ID|Sv8oJn_C9)O1zbuY>K*a^xdd6 zU36Kfa;|4v(ZyPBK0A{oHaJ8PjdSHRIp4ywO=?bdQpLbe2lAg*9VxV5(N(sWINpWru*A;@6L_BG`MdZ+St z^((!K{@U@h?bPINzMXxUXSlog+Q&&Vr={2GC>_6Wzi_L~YG8&R44vRfhz-->u1@7b z;Mu;fK5&CacGV2#H;ke)ak*N@*V8bR2OW}vTsgw-Vq5kDp@qq!MJTNH)N{{>=3pa% z{-J$6rZcxXKp=n}hbh~U2_0rsNyjZ^35%14vzIFqhBw!i#TO5hKe)AG?+iucq{+)+ z{daseU2XSdXZK&`#n%=1dBjVzmyFWm>r7Ys&5m;n5ZnKSg-uqtV078e%cZ19NmvJ@ zx0@&#Vmd}AzlTP89;_+oUnV`w$l!BSIir$giet((&=kSN`-Oa@fuJvCBITS&OLk^v z`ORbsPYALoLBvAjoa6f|OT1ldtWLE}OEM!#NsN0l#%cfkE*`?a@FbOlt)&9+9%J4?&KzoAK%THDg*PdEd)S;ZG z^C&E}BD@a3t>MrQ&5FgzCXs(0z8^TXw|s;8+P@-GGg4Z%7*eJY!0uD*Y=(Dsw3*uoPbbz&m)!XDEMm=c}=>!;jtS>94!{A7s0BzO#ucSQ|EU z;E-P%h~fBOo(HUIXSg{j-5F`I>z(e|<{Ontb!>60+-amB(CnMHh+f5vy?#DjBLdg& z+!-`SK%&BX47}GEa78=pQLv7GBRWqef3srxWN`k;vNh4Q7LZ%-IzFvVMOW$UQwoQ+ zHT3+!?*g;&VHY`R96=N-F#u*%dJltV3r886<24v)WE68@kr8AW7`q(z7?InFHx$E6 zL+z1v`c1YraS;_AON*U`uhZ-8r+K`P>Ikuf;@dJU;@M{&a*QMIn6Y;1OqP+3XSF>M zQ-VjP9osocz4}~LK%hN6zVre8m?U=&7JL+}IO8-dH3RPHorNdzarU`{S?DWeu*5W` zkjfBb`KmsNnF0xe&1;b`SXkUt#yC?PH5~`4lcj0ggA6?#OWO?565sbJ_9^T^Li1tlEwt4-pFdm_Lh{ZFJf`nA2 z&`+y`A&{~pc>cXPxGeD+5k~swqrD;SamNX5%Z)H{Ez(gv7k-eg!Nj#9J6LrW{AJ>! z=hMzqi|?dsq{UJW5fKmvP)a*8TrpdPNQ6c#q%GBrxaP0{rp0QHIOha@I2Bel?xQ&O zXE*oa`xt)Mfca2-j`ItJ-Umy)DYB>ozOOUsm`h2OW&;KlK7hCjYu;h$6f{u;pO+*ntn8Vak#+y)aew z`CSp0G@ROK+IcRvx-rnn@@~@6J)b<<6EEDb8cx8MNIZjIZ<^z|?^S%!$DtV6!2GUA zWvap?AR(;o+V*OaKB7*55F(VI)r~EekxEQC1>s}1Xu{t$A>^``>d$nKc zT?l$%dSz$(tKWTZc`?0lQ=<2)saA!_AG}Y0_+cdSK-AhFpB-4UAW_oXqQnfQwGj4b zrEdclp=gm78`o)!O0Z5$C;yA@lgx010MP&YhgOS1)mvD?5mT9BnjqhPG45n1y;XyW zK-_5VK-Ft=(xU6jwuXRxvaBTPmF zjV)%dTJoCLtIkq&#V4;V3?3uGaXRqOF2*h{{-a*bzD(n(36BRki#HUx@IY)vN4@tV2*8)!ppB?c}Mp#%X_*QXT=M ztDSqDY#wF?DK6oM%wG-Mk7cd*KwJ8A20LD>s5599*8h6fqK)~1OR|KA40cL8TJpI( z2GcBsk(P9xMgAackuB6tssSz?gz)oe=V5sDpuIOXAm1dZKpNG+T5vusb_=@4a>5f* zJYL?Mr#e3P(k!RJYS)S}6K~ZgiuJi>+J`Mef`;Ii@j2U$o{seEdp$4am(l+8ZDi5g z$dlcccGMLto^9F*_3Xe_)0B8ChwFMFjp0gFUa<;H${11!Yfjzimc8IGUIX}^a#vz= z-NsCDp{6le8h5IE@B-Nud+$fQ3;AuJ;DCRQw+;U*C#Q<|cD>g~Pv(^aD~2yV?H9aw zoUpw2mz&z55w#u{_uWp3uJtpI+dSy~;=R`1hlAh73{?i=gPbiY*j3##q&Q!)nJ35) znVV$`C-E%;c7({n?lsR-SnUhexi}yT)_baNR<(E7uRPg%VeqEJBRmG)v9 z&4RFnYo9M~V}8^N|M>W8a!GwNOpv^6&`^^;Fi8o3>h1I!5$AO*dX$G(q#4)A@?2^l#2#vii zFe)y$I!+e}IZ`^eEr>*7;b)EOBsfWdHw3DGX=fC71cfKXi?zgDjK`nmTKvGN+Skxu zw#e1lz)RhNLZ8f8Jjx)?EQt4~Mevpzf1kAcjQt_g%K^vN&#Sdq6thk$(XcnF_gc^3 zX}CHwMJEI;+S0;lOyMX~G|Cin@)^;N#%!7?kGB!CEG}oCxLm5Si<&z}bS(QaNHL-} zjgSj9$YNfRz&W77QmD5#VDIp*A^#vu-Loz9#j)AWh~Bf(Yre_xjpHK3`o@4ZP&=}; zM_htBCL*tiH7Yk)s|dm%fO94Sb|>E2|CTVqc9CtNKlwNjr&T9;cVr-kf&+XV>B;R8?=KB>^|d38z6rIS$ImGeE{eqq3OIV|g|KY}rY5 z&Y;?-LALP!HL5B8tpVR zmhU}eNs&FlN#vh=`kcN_k=1f*cT~Kt4+~eAaxbh7*1;_Qv9yoAk>LuHuoNuj75#$O zZ$lLNe}$z)^d}rD&>gr=wnF&*$b**U{8K)`TNmb->;<&y=_hoaY>z--E2XF5)$RgQ zZuRJJe1%Ft5UloFJ!sZhwS(BNJTPksndyq4Z&*|pP~6Qk&wgVkWiN8O)3l_I74M{9 z&byM@H>~)1(DJHPU1+zd0Y_$Eqt0xofsAMW8)mx3B!9B z{9CTJ?Dzmi1eNMq;?iuapR~M51+k~@lo4o>v|LjOxrpjQd#+Y+5&)^-Bo0TSRUa*t zaK0tEyuZt#nm{AHYf-O55Z6sx$VWw+KSqwHt$C;?4vi~4^28X;8*h%EC z1%GKleH70R|ElVB{6}ickZt4>8xFLFY{GmyF+z$)sp>b0Yz>#n879e{5^QKh2m>c2 zE3mkBF+Gn=145r$TWCNCuS_JQHDwC5WPUvo&J8WZmb9~x)XgWv7R+d_C|c@Bq2|(B zNiU(B?==#(vfH0k=HuH%JIb-{gn{KD;~e) zH5YRjazm|xAhwj5CpX00zIm!Dgu3_(@Lu$M)8FbN#HkqJ7jUR_;z}rWA~lsd(>e-A zRg}j47K3HXwhv;8xdMp`GRws3V2Ps)XzjOg0fp%`fIzNAOzaK2krux!8S@)iYZD7= z75`bA*o_Dw=sMLv?jzs(2=V*bh&Z=bQAQOOXT zp^-n8q_}9+QU<36ky67ZLH+=|BS}MzrCPRl!9?GJ@an9RnVxvp9Q_$z{U=pH4UX%` zON))Bu}6fc?K9kNGx)bTVj>s*i$%dFNd6M4^kZuZVztnk0!?f7zY?#{dcs>!i6NW? zur|WI8e~SDru~3@2At@OT@a1^J2Qq+DF_g1h_suU#2h)c$l#L9LVw~e72}xL=q!+T z?OeQaq(v8ueF8D7L=qP)#$!chI;1WO6bW#63y+O@}iYo$zNniQXS7*&k`ufvda~$|Mj;(pV z!m3vmtdlgBvmi%ns0PO>w0>+XyIK3!nwx0bO+8xav*C2}y9G1GBQGSHmVdO8Ince# zT{rTwlZ5(g~#T5p;W+>a>WcLg0_%`JtGDq98uBS6A0q=2B`LFLAxRJveQpX z1yGdhA=)6?7ow~oA-mvAH3Vr2_6GA@;z@E(H>PXxE-@$pivj+P)e|7p%GVI`d&D$6 zy@V;=7R~UaF?+!x#!(fKWXRfhs{^u=>9Ur*R@IAPW0==2?m7HPIwVbK=PvdAHkZL} zq0l%Qr84d8%Mv!#kz&LYX;l6Uq=@x3!scKPIHQ!r+bG^C21(B7kO{*a2`^#&&e$b& z)Xt7&wcuT#H9NDAXpo|4t$9Ki&;%F=2?{^K?Qa9;3HL$7(w_K-civc?TX{l27Cat8 zKx?*Tw`4NS{?;nE%-F@qSdSCBvQI!Yu+IzPwYFq9ZWgrUwLD}hRwwCS@lW}tmQo>E;~KK$1Yl7|LZ`cY-d-iwJN z$=hBlhLrCf>scKxnRUJFt8k?i?IneHAsEJ9)+`C`CKnWG=pzVz4bMdjnoc{djs##svD4ZCGyKRpwhfTvYJ97sGd-Ow+rzLnrKDUYLPd-BHzhpYz zoC?%5uwv+}pVqT(GxMt$XnUlqwn(wYpgFOtY^vWV?(>CLvNwqRqVxZT38w>ANVJfzir z>&Df(4CBLbW3^TpU>TarR4#BMeNjB4eoM6{X^H{7L#Yv*Yh#O$N_JfSb_oXx2$)8^ zi>UIvphftgR)t?|aDS;E=J`R-Z~=aj)IWQEM1ANV zdmG`a{$%;mav%|IuLi{YYXLBi`D60ey#{#5Qtn+uLA@&c>~Dk1UqSPAk8$T)p|qZe zOrRI4NZ#EBktqC(&>Kl3byezl^W+~K2baXVRL1E988x`(%N0hwNry1V(Z3<=%;Wu@ zeVBieV_Fek;eW}rDpb?T>HQ1ET)h=(b=Ufx!ro&W2pUbRRJaLNmwnwU2Td<_`z1yF z5am_=eXz9Ou;i!V$OTsl=cgPhzq!8eV`K?Z!~3Tt>^ohP(50Yw0p7Sq*fLp0ZoRI!+Gn?q7l>xh7RJtY;@`MRF+Ha>;> z$k>=8WUx}%C>rB6B9Uwdtp=7Sg$bEoAx7y?nQ}YXd*dEF27{v!K`R9)g!as^Urf)f zp552=#?5$%Z-$7L0cvH^@pE_Abq-zfRF!i^d`t;G~|RWXrDr9_E>Ak~Qn zE<%Fi+`NTaBC_z*otGh|63L-dOQ3mg9VVziv_Op?P-~sUwjh*|kBWTfe~DXFn?F+x zGmYqHNg**pWM7uL*}ATG!<#IS#W`*ks%J+_GenqUxpsshoD0ia;R(4a>1&-k#s)!B z0mMZX6kLVR;*I_wQjpnlCr#cGT~mKFU|>FWN_o`x^DcCK`DW1LZ_CCNuWA$*%uO#o ztybnhEpi;ncE7)^*8SqhkiymTpGjUtcik^teKVjFQLEFSZ+12$`HEh6qvb$X+=Wx_ zuiplI{-aDUXMtCV=N}kRjKq?I0URsM&z99BTh+9$G}C)s)0I%7^KoC}>6M0{(sYmm zY@x*q=O0cvT^q3&)jJ7MjRM{G`5Q*x?hilGo&FVHp?opiS6k)zx~)6G>Rd9l!N1+i zZ}tU`+Iat8vO{7A&6U~coL+s}vv#b(OW&+vTkTlNUwlYV=}phNtpcm%HC;coAH4V| zz2R$V&%pxO(S6rbefHPf{CYviK)$(!mn>fmP8&pC_7bV+obP&g#02&mKKhYT3k&x^TO)TN*z zJiux|l8Gn|ta#-GPl%U_3eavXM!%|J^roJ|x$gW$aO z1#;8ExAE_};sBW~IE^;I5BMyqba+#2MdyVUY!yx- zJC(@bDM(l}c@W9A5YuQv+H(y6QCca<^H2nYR97Z5pP9+yc`?K73>zM>%`-UXoY*oht8a z&+DFMX()y~^e(WxTl1W_TBhozdzDZcmtGx8p$K(V^3yeCHXXH%%GG|Ch9kRX-vte~ zvFNrMtS+l*|Ec}mKa<`c9A8s-x_5YW`G@4fxRLP>y>~+P)gLUmK<9R&4ZSH{1DXk+ zpnDChUO4z}s9~u?SF&G2q$XnPfyGBnM0ItRk}g&F5aUQI87C7n^qN?zg6Z-;83?HBFtaM&Nd|G1*e}gmdT9T#K<7=mxY_P#_O1Clrz;z$E{oAr_Zsi+7XGHHx|uwj+nu@qK|`9uUN>m z#Vl22kA?KaXaOzN$z-3=l}f|7}r*t=4F`S}{ zec%w<_T7Pfb^fP4{amRn^ozY=PCV?%``8p<&71ONVE_@A_r7W@<$7 zU~*T1d4bhd7=yV3rX96eG_CE`e^&f0kmwIZMI z(6)=E;(`>~b6X)ybV5+&7E%`OAyoyp+t5a|N8I4*^O=sNj{xsovpr|0YLz6J(IW65 zQerv|kn4#3R0me5>TmYE2Vye#K~9sx4%dE%m9-u42&+nrkPX|ON}$m$JeK9xLq#NH zAvK{MvQc7}*9uf+#HKQDPyk!#u#5{2nOn-Y)d%DaiA3DeGrx1PFJtLaXU$|5#0t)Q z0&k81#>&jMFmSJ#nL7`&=4oJ{3s11)nI$A}qFQ(bS>Zh<;*AQB2x(ZLLV~B_7;TPYA{=64n6%Nl`UVSrRO3q(aFL zB=3^Ni{BAQIAZ@G$LZ_W5j~j8QzLkl#^TChzo|S3=PX|U&(f%a&3l& z4z)OYpw8aV(6g`hbwGWsX_3{cR1aC@Q+pR#wani-Y3#IN;L5<1;JA4W%%OyX11nxA zblVJ5eyndmR;bl`lioC}&_xk?tLdK!OINQrVQ$`8RW0khky|&~z4#LIkoDfM_R}rt zuiuA+w8y0%CdPNC{mnV7fP<;>R7S1Gi{`{R`~;7n$7ac2y@sQ4@ySM%=gJ3C-$(Rg z{&1yl|5|33Q)oM-`o|nAoP+4=Ulo+XpXVEi5oga=ha9xDSy0y1TJtt4c)CP?;Bnlu z(e`OI&GhH>flPTGo7$1rOsCO_^I#%M1LQM*g)DACXfWx*)o#ga!u#mRKzf(X#5%h| z4^oLbsMkV60lrYJg=z`u`}v&~0fY==#s3jW1a;Uy3W;Zc5wTc{l$Pubte0-*99p_P z{ylFwRBtmzJv@rCO((G$>Hsf*ueLBDerBN(?12%`1{itbEN0x#cidds65Y(4%PFRi zsYDn*!c)y5vcN&-ILLY~-6EfbjLwydq#iOSV^KFRXdBwqQCSlm%d$V2rw!&Pp{jHp z#A8@6OJ(ps=z+5Jv8Uyc#rUD9(ggFW6p_CcBcJXo~uGt13di%;wu3j zOyTD_`3!BTj<%S{y#y6Pie%gdH_7qyo;kwi4SvV(PDzu30C!M06Wl>vh43%_%cZJ`Fg{Fqlx8y*?*hg zsNb3td{=j?X_e<6Bg3tt!6a^YUD%@lbW(VfWQPP44_*At&TnbOr(iQtwKCg}5|i{s z_fg=Ul!WkjakJTJE5M6>U0;9y9SWOLRNKDq=uPs3!RciY5c!J172ST6l|@EZ%$Ikw zd_&Ki_N;H)kG7rGgjZWN=c65Dy0rC7SJ&O2P7U8}lw7}F6=~H@t?#ld@kq})B2Q%& zeQ#fKW-w__YUC9hBG_dXqFh=!te`Xm?uk9S{jPhL;P1*1zD`PWWVaRkiIK6*=I`6S zx?h%{zh$63uK9K^dbbjm?VlRga(efnaejQ?kmzjyG24t&w^W-$=@*R~gDeROUlZUN zVcbjPYXBGAGI5ANj;|E48L*Qed{YXQSzIdKDTRtuV0pzFK8)`!Pe79_-xw?%K^{x$ zPIGl;jZ}`p0TiwByK%?uM(pQ*R2dUu(GibmU*t}EuPOxKO4}16jJp(_{4GrsuT{eN zRY=u|q;d;R1;KBva@4V5ewgrH6BKYRi0}(7xQ&`nnF0%-N;G)3S$7J|*-}SZR^Jr+ zV;4IN)*CN+z!l8{ui?`nljh^Erl@k-ZH65*R$Bvi!YGlH=7^3{b4fWJv*VFR7_3}- zDx>{>4MVevFfx@mp|Hfx%~)ib1s{r#7XwPH5dr-*tA{QA2J#sVr@|8tDj7f|+{FZX zTaOJ(RmZ9F4EqIIz~UvACfvGyF`o{LE~<&g@(e7@QSXD!Lu@Lp&ZT1L&ISaVVS^Im zI^g?xF_0P`1q@sXl?jmWh>Z-W)1R>A1bIz_fq+e^I2rjK*i1}S3By8^?jZ5S5SAe1 z3AN0OSeY#u%ow)Rqbc8O9ZNPjS0lqwoe|5@3Yoid)_e_?L;JfM2hg{>Xs=?=HjkK* zmK`zoniJr(K)+H^OmEeW2LG-VW(fHbtLg*T2DKgysQeaPuMDu~&sIj>`LU|j*xcml z@%aBw{Gj}P94uKCxmr`m)xR7F)*UDrdm#`iv^DK)x( z&i`q)=ypm{$y~FPq&IWdcxE36uki~Vns@5S$$-$Nr^b+lD$Z4b_-<1;?6_K#_PT|| zhFLB0PnZ%9M%sAxK91|W-4B;j!S|`FdhhS$T?;6YaPpe=0gKXIH+V?cD%c1Cgrfo1 zf`pyFgMeN(A_?A6kS@R%4W3$l)VIt$rkSmrDPF9NNK-O5Z9$ z6=>Iy(IlcZZ-fF%H-I$n%4B6{NeN7#v08*d5~}Tq{DAGjbd2DbK?97TmC72_c_mnM z7IImOET2cJ%2dZ)Mif4kDf1Q1SkDw~%sz?apuu0io)-OO(9A{EyXK#1UYfV4b`cJl zfU8OPzq63~saUA4bh739ai^!fF-{s@xTJB#V-C+F zEb(tfP1<5Q<;xO~+pfsFsRKElwS2*VvcMliQ@I9ECT@O#`7c*P!bS#@U6Y+`+zAi$ zPPg$abN_SxGaN8~~|oS*(Wvh!nF@U><|HBJj(b0jlh zLHe5Sj1`UfpE1cor$9G+)s2^(HF}|qha&3StnM{i=0&BuNv8j~`rOiXrnyyUTa@uy zW}D7kcF|tn9brASFo!qmb^R!kv%i~5=Q%`pwkm9HJXAQq%F%_O-yf(s{%$nn{(MDF zLQ!@a!h9>;NN$l=*CXfC+2SR%->J-44C#kfK&7<>;p_|phQP&d$^@)7rd|J;t8Srx zY{bqX8W5%42(X7jYtj-@r*i}+u$(2_c>5rhlQLHeN=SCBkQd(ys}iYqLVl<0k~?an zGDhMxiPNM<05}?B6ye?`$2DmjcV?VWd%h0j^joOi$DL{F>vAb4H#n=a87DX!CCg3u ztDD;1xWaq^BxbDEwAj{}!BdyGaa4CdZawcDFgZ&jjuXQa z(3s1qoS&KbzFI^;#bQvaJ~EAC_P0InS%A#GDYF&vo*2g6kCXmxypG>W0ho#VL+!b< z_dWRRi0c9NI}SEC9-P)<_rQGlD$!XB(LC{XqM~ZZk5ggEqiLw|K;28lJBa$jV1g;p zT2Y^4kkEZng5qKBNxxo(9P@&9sYgL%nW)G=z|*zFxvndkud!=3V;%)6ZtWTkSKIF{ zOG&*^?Z%YsXp3n8LiyZ!ycsO5ViVP=Z}!a@H1(v?WyuQ23#o71g}j;Y+yw#e*XHE;-~c2A?dRH2hD#<^F4UO%lw4f z(vMY@V|}&5HP7F@yYb5H>-kB;HI0SlWrc}nW5-?(^AC3?sMd4UwZX~v4;EBZhbblb zm4TGHu+)KOGmq3Nxtbw=^3MskOs54T%|8}@@%Oh62F;XFT*db*AF}yui@a)7fBVy& zyRt0geYoaj7I^j74KnGxF@zje&^qE)lE= zJ~RhM{+76NTR{lH4<)%&E>ssJ`~Z+F+jsTP&F?6DyF65aresY;NSyVqKpW?X4 zTb2R=(@iAfRc@T(k5doq;{b7TxBuV5Nh5N!`6@lo6KCgAtgT`t543e zpGSk9BiXF5g|p!}+H*c^8IGGtQEJLc%A>)0k|B zx_TKZY6dM@4M)#V!8whzY17v89vCt4Jv^?**JUt78e)7+AdSRD)|A2c4NPe>CR{W4 zwxW)cwjf6`h+eMV+7fqK`@02U`8V%>3axxua-{wpxE*h$Dxik7Ml@(1PDy!ToI)1? zLBuaU5Whs}5#Lm8Yz}LIc7aE@RYZSH;{i+m7D(R+ww^1VzdHP=d@Nzx{qn(<=9>%X z@PL|Lxw$~|&^@JTcG)8g9sIuitB(XU)UNUjT(!He#?Mr?sq4U<67RLHg%7_t$!W_oXPl@Gnn|^jIPQkvGRxQE%j2}^&VfIODl%I$ z7zT`8;zcXZ$klU&L9o=rrJ^wbU<7%E=$S`=7Sp61xvX{~QgAXOaN#UWyuSQ#(aQNn z*b!n>+G*^zCNX^xTPCJ)7y$kNLL61el}*ezq-qP*StU?_0*;7iD6K*G&T~=&_cP{7 zq4k;3-V?!*#zYwuY;I!d182gdkEdju4#?F%a~CeUqYpW}XJV2y#5B@O2}6d;%&!vb zu&Gh;2 z8axglxRAtXHtHDwYIt8YYgFjUYn=ea;hq<$Apyq1cbK!9~`H=mya!FudXFdQ6u)WOAvcE4d+C>_7I{&7pAAI@G zq~)W1FDe!H*61&$#FzXLIz({9q40!otQo)XUsNn_lLO{zdB`3W$lm)NJUjU1A*)7q zyQNvD(zMX@k4URmhFH#6n~2pbp8xQ>;*`~%3jg%0uM}|;w{ZTNf)9_L_21fy=zMLguSfylQpY!I zIGB)9Q&nr2+s=NDmF?#dbJTbaCR*@yH`no2Tnt#k*XzA_F17M30(w9w#QvvCoF4Lw zSO2zM#1#YIA@jl$}$V0|;?DksxFJXf5(TR8VhT>3Bd;q?JKvfp^*LYc^G zwV1|cUcMYO%UDBoj^h-fvTT;~zICQgVrf7wC-oR)X7t&j?rnzEeMVb~T1Wg*_$jC0 z{hxI?&p_=s<2Q)GG>QdXbm5CF9_sPy?SsgvcQQL$&Mwq7%#DOWP_EIE#sEvp5{Q?G z-EX6V5KN6~3^|R1)0e4k>s@%!b{CC3vduDo$+!D4iB`K%4AIdJ#B^3RpS};^7V#E^ z*Q`EH$p!P1SZSeKhUD8C9GdU6j+6>={QA$s;7+XY9a%!2+nrX}N4X6Szi%61JG4Ot zVYRD7Pvc%1TK(lb><2NJ9^>A>;Thbb=vX5T)&L2RsN6zO=E2Z6*Y1>X&g0KeN6#j7 zZ~>JQ4QjbqEr0P82?JB7^Y7pQA{(^pzs4zzWZ5tJRGl+Ho}@I_n3$Z~-5oz>=4n3s2791C0POP9b*tN={$!tI zhaM~cmhQigR+80f*WW%%$4Xd9K;4!M_UQHA+IR?8KRox=ZY@eCN9{ z{``_{({zh#`>J2y^vmMqZ7CAR2!j^A9xco8A?j_d^V76K?ZFeGiC!9u?KPT_Ie|V+KE<%Fbw2XMlp6-7{e5uBU^$U zzm`$YGXq3&b7&(1f)GF*<^za9i&XWG#d}cP7lv zcqp0QMxV-z25VDG?0y#F(psk2z4lv?A|MlM z4?O%lQAqIW3I0ZPnS9|a-AFhL#aMwwd7$oG75A>M>)}(ol%>{dK33C2!^53(PqHZ?)5h<^dF8nT|`3gn3rVQu;Ms(_t=p6ZksTw0m$-`$ zKZ?XKyvLBBZgfaX&%*7Zi;xL5~6DdoOthqBmfHx#x;uG+6ecH>UE9 zX6J1^^I?YbQ9+g-f;G6lSdjvI`$rNBB**lR(0+Ytt_1+I%Z1c0` zQ7gW2CnvyPYeF9B?%eqB+gZN`>+Cq&?iq4=_0zVW87G`pyPTzqnjBVE$Qrf$BH`j` zL)$=CHd=$n!utlyI?Fr*9s9HS=5g%2`i&d&U7d*q4ceuzh0sW7swJ~700f?(gPmo8>#lpxe zdrzO{bPtQ#s6F7CwzYojT+MqM%jGcTUt#4Or$JeX|CYv%*BycIZ;h%F&2@AA1GF}8 z%r%Kx#L5%ke3+hXa3e6H-EV6Ns5rkVua!ucB_8(-)$93a=%-^)0-gKFF_L>d7THG- zE$!qlj6tVXwe|>4Bf>p#8X5?+rCx_A)IxP`J(+!=ZoL%Smi!fDm-6*oM5qblxv_qQ zqim^8+IgYVJ?4!$D9n*wV|HEygEd0NVwmiJ#d%!oIjU-+=*kG1mo>#SEjHN=#X5h< zz7bYs#O9iyj*QN*3l=;%^JlhXBNv<}6Q1T%dG8r0>}}@Cjo9VRIXkoRxkV>fn2d~y zhpLSR4FD>e-K^t`(i3Ra=QY6>v7X`TiB4#t5%b*fAEM_7GMNG{b2)>F>re_S(R!yE zydVUDpbnY#h;ZVTZ=|Ht2lgd0mS#H|sM(lDlCx7rIYhyy3{L(xc=Rx4Z0ThdQWs05 z75_)!EC4NJ_V{F|TZQ*sC zrER{|DcD=E_{7$B)dgK@`-MoFwJ9HiWIL`NtD7_{H?4S8qE>BjdF}7>{F93Qn$4}< zSKB|UJgsQfy7VPF>2v4m?yK3c%{MZ5KIq7Vhk9S%;PPj~@j+qRD7+wUJwhWK3fkgt zBv7q6TBOIR__+ofxq&gxt+;308`P0?1a=i)jR-HoD7+}tEV!b90C+^96w`uuEx&U_ zRJSd-P0Y{oT~S_*1e5#`FtDBN+CEzF3r8 zvD5u{2z=`FZyCSE!4!x@&a;WPUt@(GIHm@yMoE^sSG;%F@ulsIxfsdl(&e(^5!<&L$c1K>ghTQNWS(+H5JYd-<8N7BC*<>IY zUlcB!t)H6Hx@q`oWXj0knmn(t8V}i4NlLKI7SF-_wFN2tjOus$-w&mlYgIIItSbXS z=-)+E22Ke{zfusQQah!yf63m-S=kM^bl@!H&T^SYZqaw#I=*h#54tHIJ^AqcXK{*B zPL;5J->^>3IB9>PjI8@^v)mm&+ZXlf@)qxIaX-64xohFN27#Z`#b%AJ*=R&2Ey=6> z=5gNiVtQLUuue0Z{skMN5QpeE&}z)=V1@KG;Ql5!(}6m!7(@MqsI2(C z8X}xlOhOVuij{|85^R+bX=k)vjfCM5RN%yFib_I)Nl17|@Kx;y3RDUfVnA%CQy=wp zi?eiU9qRPX-0$E3i^a+Ta?bzn$G7+X_WmK1C^T!);D~vqgH(%!VhN8gt+5Jhl-eTS zv_nA&s@NguknFt83-`y}yQAZ4FqWhf>Sj*SqSuq6txk*cZJFby_N8;_*@?cI2az2c zGy2{IGz1Oy3>*W}SP0iZ4@J}QpEjfIIf?m_5^?h*;Hvg~98KAK5_aTde#_SQdQtRE zO=jnW<0fc>$Lhhq$mmmHwNT1b7?YOgpDgG^)8?s*)p$ONMgl$U=Gce;0&SSQeyYW^ zuF`W7d0vfu-VJ27wEb`STkpJ9{H*b!=*u6jeKPgK%C6x5{h!mj&KzGl`LyeoH}7E@ z@{YFn8z*jTeR?@D{XznJ^Y`-2bsH+=r}#+l2V<0Xpa zjYz@@R4FO56UV;<%ROuc`V9K1@j5zrvkQuRl8;*&5iU=KD7x z;;ON-qSUo32Cr-tHFpf&{W?5x?urq=E8pxKiVxcsUhv@?*S_sPa-4mk7^4EW&C~GC zAs%lJR5;~YuhWAv3^=&R6M;tjS!Sn_JKWS{^v&2yyFKoOx>1xv)!q<*UC zLg=(9u&}mNyn>d)^=T~_1;$Ud2OWGElRT_-`GDUcyih5~VOt${UI;`ml-wu~8_Hy= zPY5s=p;wr-fHw|nrII)Z7cqUi?aOM94=fxlqGKm&3vrxos*8{-bw)l61bSQKi&la} z7-H`Yvv-$U`omuk`n7y+Xz?SE1UTK?v_2mC4UQn=q;OjAoNI=nf4D8Nssd+q=I31) z@2u)O|5t4$MKq;KP)^KLfxuD>gC6Srp(CI_D(K-lfT3*Ef@R8sSKj%64pEw-Z1-AJ zRv~Is_wz;pG%{X+v^^7UhGKleKxn95$T5)oAm|8gN3aOlN6@>c9Rqi+8;hYD0f~n zzBB;O!K|v)qnRgCj&5@bAxx9s0WAR5@OYFh;f5M{*L%@)M}4^qDbF{}g<=7yqk9RN zeQc2tCYV5_)(nPMw`Mvg0^DHD_qauKy>0}7RdRDEiD5v)sm^KdiKGe_WiMYo#BRZh zjG?iq>z!G5lW;;c*_u{_d?tbNP@MOP-(-NFQt0tE~=#*?9J3Yv_0n*Bv|m)rai`(jb3&^+TR5mB8H9#5HG9$#6h zLtNZ-zE(7MyjTt$;Yx~c*@{}_%V{>w83jp5^+L;-W7SbxQQn1Li-U>o1Rg3Z(YMtm zMBS7UAX0v*3e)^;{bm%gsFf8^!8F9n1nM$e3K@M1vMEA}$H>F*Eed&P`{zjA3<{TF z4?wE{Q4Pccz6uK0Sq)|o9Xkgq;IxoV+jmfYv^RRdZkB=}N;SVj2_V!$ZYbJ<=Q)rF zG#>ht_364xcl&>kl`G7Bxp0-Kb23Q95KbhOZBk>Qv`8O zWQjEFQ$>?gL62EiZkNHRAy|^V{DVz%{v)E;ov1}PEPc^zcuCP3I;OfgpzLz|{DOrN zrCuc?L`$S7<-zIB100aU0>Y30dFD~B^QTQfW}MDoF5xR^iRKG6940+RG)k|^5`*b& z#AuULrxvyg60PKjf~H9Amvj&c%yyo;kS9GA0vb8sL`0K^;Nz1`hg%Ms8Ya}kxL4P; zwVoB`XXl#N6cU&w^0k+J}ckNz}N|gA}(boEs&XByDyZNJbR^5(NtaR1rcq%3Jj7XH1i;A)S=Xk78gm zO!5R8npQ&HtdjRec>v(JfH221&9T8_LA9w`hgN1yfngXHQY0I`s-YTE_T9a;Uv60V zNy+CcXE!8$m;JAXA>^Xr6@v(^&R<6yW(b`g|Jc2^ot-)LZAL-Z>K!Diay^|}o zjCds_wU(8kr^;`4B}4_no3M#7>^6yP(0ZQFi~GsOQST~zYP%GNTlCY5FKY)07n6pt z;v|xK4#I=v0Rw7Vw-1tOjQ&fj!VKq8g$Gf+6xWO)pUNuo8tnrbqz7R=F~dfR6rwR& zhU7V2Ze=rS2oHzkrd_frdl~wIWCR;E3KDeQ*YhzcudUu#5K&30JfJiorixg6URTj% z#?5wC%iNlIqe5RwD&e=-Z>Gs2SJVZ?(genGxUnFsU2V4POEnZp%kReei+WsnD@B&I zand%M>BKU_NaeSqT4P?Q?JZW+Q-)h=G5%@s9+oEnr(jk}eX(*Yh^J1gH7?!+^3U3~8He5{2kqM9^L${ObbCzcUHWGAFBcT^~e6-P(am{32P#HLP8%iMkh3WraGO-z!n zF`a(_uK+e;2|pofgPP7_6Wpoka0LW$ilsZf4XX8p}b5Q8v(oK6EIe(y>Sf?y)FFhRJWH z9SWD;w<8NxXlLJo*7Qrr=c2(YM6-cLWNk|2Zkm-(CWRFEPa4%^tI(z#NEf@9szOqSv_XRVWanvJ0!b+t+S2nB-BdwE3mP>f6keToMZ=>pobX>N3m%NR zq@aT%Bm@D}u99}ZLQKExgriw6Ku*{$R9pHvAvD7qBu8wi5n4n%Ce6UmGxRKjR~F5t ze-%{8XLCev`aX)A9M}wPq6pkmpW9SjjZL!0&B{BUJ5+`@`W74@*`h5i-O^Z92roz{ zxxO&WGR`pELoo+q;ELQu?)$BYPj2Kb4{ZB=Xd`6v-+t9vMb#!6{ila!$EzPqSG=3V z-kE#7BR6#WQhlVLqB=a;Ta%sa+Lt|}sZ%F53-ol3Xt8>JvQKp6`_#(sn96edd6HsK z!H1aLpHi^Wv8qjN+J^b4;m!twXKBF-+UDS7jatj9vQupf^TnR^0y%IG<(nxs0hR;= zR}0uCFAJdXa8$p!k^mw_ZXEBB!Je(FsHU;L;Fvf}^z^oQx&xW0y04d8Qv|0lY)4Ej zCenM50%F~rF_0FK>+$IHizlVA_&ywb_wN@EpIFq;DXjsVf&A}3MOg%BO1Jj9CSL(q zD_VdvQC?jg*8cjc$FDDsDCsX?`9^{c{*&f^V{f#dj1>z{69UB|RUz0Vu{oVXtp-~V zdx*Q(M5Rd-7^(fFkb8#@A`NaEN{TELD`Fwu-8eQg_M{#G~&>_xkZS(0r+MyA7x3q~bP>#@YRwQKV8{VO0tAJp}-A zx-ypQIm;n*Ozn56nKe(e^xb$w4gFZT6PLw~n<~SuII)XFS9EbIA>7Iv zml`i@pBzp#r&b7wF5NIP7v*Lv%mvrbnlT2rJa_?f3dl0u?p>nG)H|2jG;vpf%PFP~gy-o>oM z81L>a9r9hY-LL0GEt-|5{U+Bjp(1%kI@Otv@kN^VN1O&63Iud5ZQibE;Vg6JkTgoyhE&h`zmRW;I3% zEzdJ`PqYGY@g5~D0vy;KQ7znqss!%!1ZQ!#rk%!xC1|x;`rmf>D)dtuLAE?f{Hb4F zeeBjo#FZRnP{qqB4&61n`DK4Dq<*OXyIO(rgDo{a8As|9d ztEj%lP8g#WJ@;f^?k!^+x>%jrTSgb$FVCBm4`QI9RL&C{a_zu&jt?$fTlKd>(WANd zZAG=b{v;x%^!D+uq%8cBcyQ&ch@8^v)dg#|t-Q6dtf(%MWi5WMy{G9c`iG}x=#bpd z6TvgpA~v0jV&FFAn*X@$>}jOR4j-6d8Rd{q#EtYI-RPbKOaC0fp=o%-kp_2oGJ27l zgOWf6v5szH!yPZO52*L7=4D>qsB48(B{AFgcXU>ly`=#3Ddl+x@>hf%;X0QrcBHAw zcCyFLgIz*H-pSrEj!?~t(f}Fi2`a>s@I2Oaw^N;BimwK)Fcll)5<<1b_CNo(wo<yVb<4ktr2<`6XU#VtQIXjBq?CitXx<{2zb`zje5iG2>r>; zlkuAZCZcamGLr(3Bb_tBkwo*$0@JsvG8#I_Cl__cgF8CQkLeFIb_v$}k#qL$V8E*9 zoZ;PYU~B>-Ao1{X1cqz;2TM5k-T&&9?8-f{K(^(sD)>+Nt;`$OWoYukmuecpyfe)S z1dst#gBOY8VLJj80E_exsa~LGtttGS!Sb8tox7^#{o(_Sq&C(oIohM%UC7vOY|U=h zn>WztQydX{emWwQ7EB5g`Kcp_H)=(;kxnyo6{#k z6X%g_32Bm+@GXyBg#~FXst*z(Kc2nE)SAeOy_q_7k3;fe^2K&GJhM<8UO5RW2GY(k z1-Am#fv*J3o4MDI6_q=F)X$HIDLAe6G+;Z80L z^Q2!cQi}qBrylK4oBkFkj2BrJ{zIr%4s0E92;y#vy6q{EF`v$nTQu*yA2YLqQ4{2` zNOlFT%P2hF`7H*JM9bS|3Ygm=f z?)x~|H=~~`u(9HZ#E7?s${5?PXMdGeyP&zG{OdQ-{>#HyYR)y!>8gu~M-qchf1K+p zy*c^%l`ZW265kn9MV!aH>fvzM$+9m3GOjk3yAqMk=Xp31P7Z60<~!Qd_0w~nwr~BN z;qkt-B>&Po8NI*l3GO&S|3~oMoF9X{Nda%hua>-(IJtpa{j}G>3TRr#2dP&veGa1o_@6WpKneK-j{P)fO2fDia761SM diff --git a/resources/output_rosbag.png b/resources/output_rosbag.png deleted file mode 100644 index 1ab5eab..0000000 --- a/resources/output_rosbag.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:6a144de04d5f8962819ff70e983fce67150e94191ddd3a28d164a6a3ff19dfee -size 386195 diff --git a/resources/realsense_example.png b/resources/realsense_example.png deleted file mode 100644 index 8d0b870..0000000 --- a/resources/realsense_example.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:544b919776f5c8d8aef80dbdab7eddcb1530eb4131467e5aa1de592e7631e06d -size 1114264 diff --git a/resources/warehouse.gif b/resources/warehouse.gif deleted file mode 100644 index 3af646a..0000000 --- a/resources/warehouse.gif +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8aded79f169309471f7a71785111aa4c8747c0b21912d802ed099db3bf3857d1 -size 2050117