This commit is contained in:
Marcos Luciano
2023-06-05 18:33:03 -03:00
parent 79d4a0a8cd
commit 9fd80c5248
25 changed files with 108 additions and 41 deletions

View File

@@ -18,6 +18,7 @@ cluster-mode=2
maintain-aspect-ratio=0 maintain-aspect-ratio=0
symmetric-padding=1 symmetric-padding=1
#force-implicit-batch-dim=1 #force-implicit-batch-dim=1
#workspace-size=1000
parse-bbox-func-name=NvDsInferParseYolo parse-bbox-func-name=NvDsInferParseYolo
#parse-bbox-func-name=NvDsInferParseYoloCuda #parse-bbox-func-name=NvDsInferParseYoloCuda
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so

View File

@@ -16,6 +16,7 @@ network-type=0
cluster-mode=2 cluster-mode=2
maintain-aspect-ratio=0 maintain-aspect-ratio=0
#force-implicit-batch-dim=1 #force-implicit-batch-dim=1
#workspace-size=1000
parse-bbox-func-name=NvDsInferParseYoloE parse-bbox-func-name=NvDsInferParseYoloE
#parse-bbox-func-name=NvDsInferParseYoloECuda #parse-bbox-func-name=NvDsInferParseYoloECuda
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so

View File

@@ -17,6 +17,7 @@ network-type=0
cluster-mode=2 cluster-mode=2
maintain-aspect-ratio=0 maintain-aspect-ratio=0
#force-implicit-batch-dim=1 #force-implicit-batch-dim=1
#workspace-size=1000
parse-bbox-func-name=NvDsInferParseYoloE parse-bbox-func-name=NvDsInferParseYoloE
#parse-bbox-func-name=NvDsInferParseYoloECuda #parse-bbox-func-name=NvDsInferParseYoloECuda
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so

View File

@@ -16,6 +16,7 @@ network-type=0
cluster-mode=2 cluster-mode=2
maintain-aspect-ratio=0 maintain-aspect-ratio=0
#force-implicit-batch-dim=1 #force-implicit-batch-dim=1
#workspace-size=1000
parse-bbox-func-name=NvDsInferParseYoloE parse-bbox-func-name=NvDsInferParseYoloE
#parse-bbox-func-name=NvDsInferParseYoloECuda #parse-bbox-func-name=NvDsInferParseYoloECuda
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so

View File

@@ -17,6 +17,7 @@ network-type=0
cluster-mode=2 cluster-mode=2
maintain-aspect-ratio=0 maintain-aspect-ratio=0
#force-implicit-batch-dim=1 #force-implicit-batch-dim=1
#workspace-size=1000
parse-bbox-func-name=NvDsInferParseYolo parse-bbox-func-name=NvDsInferParseYolo
#parse-bbox-func-name=NvDsInferParseYoloCuda #parse-bbox-func-name=NvDsInferParseYoloCuda
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so

View File

@@ -17,6 +17,7 @@ cluster-mode=2
maintain-aspect-ratio=1 maintain-aspect-ratio=1
symmetric-padding=1 symmetric-padding=1
#force-implicit-batch-dim=1 #force-implicit-batch-dim=1
#workspace-size=1000
parse-bbox-func-name=NvDsInferParseYolo parse-bbox-func-name=NvDsInferParseYolo
#parse-bbox-func-name=NvDsInferParseYoloCuda #parse-bbox-func-name=NvDsInferParseYoloCuda
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so

View File

@@ -17,6 +17,7 @@ cluster-mode=2
maintain-aspect-ratio=1 maintain-aspect-ratio=1
symmetric-padding=1 symmetric-padding=1
#force-implicit-batch-dim=1 #force-implicit-batch-dim=1
#workspace-size=1000
parse-bbox-func-name=NvDsInferParseYolo parse-bbox-func-name=NvDsInferParseYolo
#parse-bbox-func-name=NvDsInferParseYoloCuda #parse-bbox-func-name=NvDsInferParseYoloCuda
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so

View File

@@ -17,6 +17,7 @@ cluster-mode=2
maintain-aspect-ratio=1 maintain-aspect-ratio=1
symmetric-padding=1 symmetric-padding=1
#force-implicit-batch-dim=1 #force-implicit-batch-dim=1
#workspace-size=1000
parse-bbox-func-name=NvDsInferParseYolo parse-bbox-func-name=NvDsInferParseYolo
#parse-bbox-func-name=NvDsInferParseYoloCuda #parse-bbox-func-name=NvDsInferParseYoloCuda
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so

View File

@@ -17,6 +17,7 @@ cluster-mode=2
maintain-aspect-ratio=1 maintain-aspect-ratio=1
symmetric-padding=1 symmetric-padding=1
#force-implicit-batch-dim=1 #force-implicit-batch-dim=1
#workspace-size=1000
parse-bbox-func-name=NvDsInferParseYolo parse-bbox-func-name=NvDsInferParseYolo
#parse-bbox-func-name=NvDsInferParseYoloCuda #parse-bbox-func-name=NvDsInferParseYoloCuda
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so

View File

@@ -17,6 +17,7 @@ cluster-mode=2
maintain-aspect-ratio=1 maintain-aspect-ratio=1
symmetric-padding=0 symmetric-padding=0
#force-implicit-batch-dim=1 #force-implicit-batch-dim=1
#workspace-size=1000
parse-bbox-func-name=NvDsInferParseYoloE parse-bbox-func-name=NvDsInferParseYoloE
#parse-bbox-func-name=NvDsInferParseYoloECuda #parse-bbox-func-name=NvDsInferParseYoloECuda
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so

View File

@@ -17,6 +17,7 @@ cluster-mode=2
maintain-aspect-ratio=1 maintain-aspect-ratio=1
symmetric-padding=1 symmetric-padding=1
#force-implicit-batch-dim=1 #force-implicit-batch-dim=1
#workspace-size=1000
parse-bbox-func-name=NvDsInferParseYolo parse-bbox-func-name=NvDsInferParseYolo
#parse-bbox-func-name=NvDsInferParseYoloCuda #parse-bbox-func-name=NvDsInferParseYoloCuda
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so

View File

@@ -17,6 +17,7 @@ cluster-mode=2
maintain-aspect-ratio=1 maintain-aspect-ratio=1
symmetric-padding=0 symmetric-padding=0
#force-implicit-batch-dim=1 #force-implicit-batch-dim=1
#workspace-size=1000
parse-bbox-func-name=NvDsInferParseYolo parse-bbox-func-name=NvDsInferParseYolo
#parse-bbox-func-name=NvDsInferParseYoloCuda #parse-bbox-func-name=NvDsInferParseYoloCuda
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so

View File

@@ -18,6 +18,7 @@ cluster-mode=2
maintain-aspect-ratio=1 maintain-aspect-ratio=1
symmetric-padding=0 symmetric-padding=0
#force-implicit-batch-dim=1 #force-implicit-batch-dim=1
#workspace-size=1000
parse-bbox-func-name=NvDsInferParseYolo parse-bbox-func-name=NvDsInferParseYolo
#parse-bbox-func-name=NvDsInferParseYoloCuda #parse-bbox-func-name=NvDsInferParseYoloCuda
custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so custom-lib-path=nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so

View File

@@ -40,16 +40,16 @@ wget https://idstcv.oss-cn-zhangjiakou.aliyuncs.com/DAMO-YOLO/release_model/clea
Generate the ONNX model file (example for DAMO-YOLO-S*) Generate the ONNX model file (example for DAMO-YOLO-S*)
``` ```
python3 export_damoyolo.py -w damoyolo_tinynasL25_S_477.pth -c configs/damoyolo_tinynasL25_S.py --simplify --dynamic python3 export_damoyolo.py -w damoyolo_tinynasL25_S_477.pth -c configs/damoyolo_tinynasL25_S.py --dynamic
``` ```
**NOTE**: To simplify the ONNX model **NOTE**: To simplify the ONNX model (DeepStream >= 6)
``` ```
--simplify --simplify
``` ```
**NOTE**: To use dynamic batch-size **NOTE**: To use dynamic batch-size (DeepStream >= 6)
``` ```
--dynamic --dynamic
@@ -61,7 +61,7 @@ python3 export_damoyolo.py -w damoyolo_tinynasL25_S_477.pth -c configs/damoyolo_
--batch 4 --batch 4
``` ```
**NOTE**: If you are using DeepStream 5.1, use opset 11 or lower. **NOTE**: If you are using DeepStream 5.1, remove the `--dynamic` arg and use opset 11 or lower. The default opset is 11.
``` ```
--opset 11 --opset 11

View File

@@ -38,16 +38,16 @@ Generate the ONNX model file (example for PP-YOLOE+_s)
``` ```
pip3 install onnx onnxsim onnxruntime pip3 install onnx onnxsim onnxruntime
python3 export_ppyoloe.py -w ppyoloe_plus_crn_s_80e_coco.pdparams -c configs/ppyoloe/ppyoloe_plus_crn_s_80e_coco.yml --simplify --dynamic python3 export_ppyoloe.py -w ppyoloe_plus_crn_s_80e_coco.pdparams -c configs/ppyoloe/ppyoloe_plus_crn_s_80e_coco.yml --dynamic
``` ```
**NOTE**: To simplify the ONNX model **NOTE**: To simplify the ONNX model (DeepStream >= 6)
``` ```
--simplify --simplify
``` ```
**NOTE**: To use dynamic batch-size **NOTE**: To use dynamic batch-size (DeepStream >= 6)
``` ```
--dynamic --dynamic
@@ -59,7 +59,7 @@ python3 export_ppyoloe.py -w ppyoloe_plus_crn_s_80e_coco.pdparams -c configs/ppy
--batch 4 --batch 4
``` ```
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 11. **NOTE**: If you are using DeepStream 5.1, remove the `--dynamic` arg and use opset 12 or lower. The default opset is 11.
``` ```
--opset 12 --opset 12

View File

@@ -43,16 +43,16 @@ wget https://sghub.deci.ai/models/yolo_nas_s_coco.pth
Generate the ONNX model file (example for YOLO-NAS S) Generate the ONNX model file (example for YOLO-NAS S)
``` ```
python3 export_yolonas.py -m yolo_nas_s -w yolo_nas_s_coco.pth --simplify --dynamic python3 export_yolonas.py -m yolo_nas_s -w yolo_nas_s_coco.pth --dynamic
``` ```
**NOTE**: To simplify the ONNX model **NOTE**: To simplify the ONNX model (DeepStream >= 6)
``` ```
--simplify --simplify
``` ```
**NOTE**: To use dynamic batch-size **NOTE**: To use dynamic batch-size (DeepStream >= 6)
``` ```
--dynamic --dynamic
@@ -64,7 +64,7 @@ python3 export_yolonas.py -m yolo_nas_s -w yolo_nas_s_coco.pth --simplify --dyna
--batch 4 --batch 4
``` ```
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 14. **NOTE**: If you are using DeepStream 5.1, remove the `--dynamic` arg and use opset 12 or lower. The default opset is 14.
``` ```
--opset 12 --opset 12

View File

@@ -44,7 +44,7 @@ Generate the ONNX model file
Example for YOLOR-CSP Example for YOLOR-CSP
``` ```
python3 export_yolor.py -w yolor_csp.pt -c cfg/yolor_csp.cfg --simplify --dynamic python3 export_yolor.py -w yolor_csp.pt -c cfg/yolor_csp.cfg --dynamic
``` ```
- Paper branch - Paper branch
@@ -52,16 +52,16 @@ Generate the ONNX model file
Example for YOLOR-P6 Example for YOLOR-P6
``` ```
python3 export_yolor.py -w yolor-p6.pt --simplify --dynamic python3 export_yolor.py -w yolor-p6.pt --dynamic
``` ```
**NOTE**: To simplify the ONNX model **NOTE**: To simplify the ONNX model (DeepStream >= 6)
``` ```
--simplify --simplify
``` ```
**NOTE**: To use dynamic batch-size **NOTE**: To use dynamic batch-size (DeepStream >= 6)
``` ```
--dynamic --dynamic
@@ -73,7 +73,7 @@ Generate the ONNX model file
--batch 4 --batch 4
``` ```
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 12. **NOTE**: If you are using DeepStream 5.1, remove the `--dynamic` arg and use opset 12 or lower. The default opset is 12.
``` ```
--opset 12 --opset 12

View File

@@ -43,16 +43,16 @@ wget https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yo
Generate the ONNX model file (example for YOLOX-s) Generate the ONNX model file (example for YOLOX-s)
``` ```
python3 export_yolox.py -w yolox_s.pth -c exps/default/yolox_s.py --simplify --dynamic python3 export_yolox.py -w yolox_s.pth -c exps/default/yolox_s.py --dynamic
``` ```
**NOTE**: To simplify the ONNX model **NOTE**: To simplify the ONNX model (DeepStream >= 6)
``` ```
--simplify --simplify
``` ```
**NOTE**: To use dynamic batch-size **NOTE**: To use dynamic batch-size (DeepStream >= 6)
``` ```
--dynamic --dynamic
@@ -64,7 +64,7 @@ python3 export_yolox.py -w yolox_s.pth -c exps/default/yolox_s.py --simplify --d
--batch 4 --batch 4
``` ```
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 11. **NOTE**: If you are using DeepStream 5.1, remove the `--dynamic` arg and use opset 12 or lower. The default opset is 11.
``` ```
--opset 12 --opset 12

View File

@@ -44,16 +44,16 @@ wget https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt
Generate the ONNX model file (example for YOLOv5s) Generate the ONNX model file (example for YOLOv5s)
``` ```
python3 export_yoloV5.py -w yolov5s.pt --simplify --dynamic python3 export_yoloV5.py -w yolov5s.pt --dynamic
``` ```
**NOTE**: To simplify the ONNX model **NOTE**: To simplify the ONNX model (DeepStream >= 6)
``` ```
--simplify --simplify
``` ```
**NOTE**: To use dynamic batch-size **NOTE**: To use dynamic batch-size (DeepStream >= 6)
``` ```
--dynamic --dynamic
@@ -65,7 +65,7 @@ python3 export_yoloV5.py -w yolov5s.pt --simplify --dynamic
--batch 4 --batch 4
``` ```
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 17. **NOTE**: If you are using DeepStream 5.1, remove the `--dynamic` arg and use opset 12 or lower. The default opset is 17.
``` ```
--opset 12 --opset 12

View File

@@ -44,16 +44,16 @@ wget https://github.com/meituan/YOLOv6/releases/download/0.4.0/yolov6s.pt
Generate the ONNX model file (example for YOLOv6-S 4.0) Generate the ONNX model file (example for YOLOv6-S 4.0)
``` ```
python3 export_yoloV6.py -w yolov6s.pt --simplify --dynamic python3 export_yoloV6.py -w yolov6s.pt --dynamic
``` ```
**NOTE**: To simplify the ONNX model **NOTE**: To simplify the ONNX model (DeepStream >= 6)
``` ```
--simplify --simplify
``` ```
**NOTE**: To use dynamic batch-size **NOTE**: To use dynamic batch-size (DeepStream >= 6)
``` ```
--dynamic --dynamic
@@ -65,7 +65,7 @@ python3 export_yoloV6.py -w yolov6s.pt --simplify --dynamic
--batch 4 --batch 4
``` ```
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 13. **NOTE**: If you are using DeepStream 5.1, remove the `--dynamic` arg and use opset 12 or lower. The default opset is 13.
``` ```
--opset 12 --opset 12

View File

@@ -46,16 +46,16 @@ wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt
Generate the ONNX model file (example for YOLOv7) Generate the ONNX model file (example for YOLOv7)
``` ```
python3 export_yoloV7.py -w yolov7.pt --simplify --dynamic python3 export_yoloV7.py -w yolov7.pt --dynamic
``` ```
**NOTE**: To simplify the ONNX model **NOTE**: To simplify the ONNX model (DeepStream >= 6)
``` ```
--simplify --simplify
``` ```
**NOTE**: To use dynamic batch-size **NOTE**: To use dynamic batch-size (DeepStream >= 6)
``` ```
--dynamic --dynamic
@@ -67,7 +67,7 @@ python3 export_yoloV7.py -w yolov7.pt --simplify --dynamic
--batch 4 --batch 4
``` ```
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 12. **NOTE**: If you are using DeepStream 5.1, remove the `--dynamic` arg and use opset 12 or lower. The default opset is 12.
``` ```
--opset 12 --opset 12

View File

@@ -43,16 +43,16 @@ wget https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s.pt
Generate the ONNX model file (example for YOLOv8s) Generate the ONNX model file (example for YOLOv8s)
``` ```
python3 export_yoloV8.py -w yolov8s.pt --simplify --dynamic python3 export_yoloV8.py -w yolov8s.pt --dynamic
``` ```
**NOTE**: To simplify the ONNX model **NOTE**: To simplify the ONNX model (DeepStream >= 6)
``` ```
--simplify --simplify
``` ```
**NOTE**: To use dynamic batch-size **NOTE**: To use dynamic batch-size (DeepStream >= 6)
``` ```
--dynamic --dynamic
@@ -64,7 +64,7 @@ python3 export_yoloV8.py -w yolov8s.pt --simplify --dynamic
--batch 4 --batch 4
``` ```
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 16. **NOTE**: If you are using DeepStream 5.1, remove the `--dynamic` arg and use opset 12 or lower. The default opset is 16.
``` ```
--opset 12 --opset 12

View File

@@ -62,6 +62,7 @@ getYoloNetworkInfo(NetworkInfo& networkInfo, const NvDsInferContextInitParams* i
networkInfo.clusterMode = initParams->clusterMode; networkInfo.clusterMode = initParams->clusterMode;
networkInfo.scaleFactor = initParams->networkScaleFactor; networkInfo.scaleFactor = initParams->networkScaleFactor;
networkInfo.offsets = initParams->offsets; networkInfo.offsets = initParams->offsets;
networkInfo.workspaceSize = initParams->workspaceSize;
if (initParams->networkMode == NvDsInferNetworkMode_FP32) if (initParams->networkMode == NvDsInferNetworkMode_FP32)
networkInfo.networkMode = "FP32"; networkInfo.networkMode = "FP32";
@@ -101,6 +102,8 @@ NvDsInferCreateModelParser(const NvDsInferContextInitParams* initParams)
return new Yolo(networkInfo); return new Yolo(networkInfo);
} }
#else #else
#if NV_TENSORRT_MAJOR >= 8
extern "C" bool extern "C" bool
NvDsInferYoloCudaEngineGet(nvinfer1::IBuilder* const builder, nvinfer1::IBuilderConfig* const builderConfig, NvDsInferYoloCudaEngineGet(nvinfer1::IBuilder* const builder, nvinfer1::IBuilderConfig* const builderConfig,
const NvDsInferContextInitParams* const initParams, nvinfer1::DataType dataType, nvinfer1::ICudaEngine*& cudaEngine); const NvDsInferContextInitParams* const initParams, nvinfer1::DataType dataType, nvinfer1::ICudaEngine*& cudaEngine);
@@ -108,13 +111,29 @@ NvDsInferYoloCudaEngineGet(nvinfer1::IBuilder* const builder, nvinfer1::IBuilder
extern "C" bool extern "C" bool
NvDsInferYoloCudaEngineGet(nvinfer1::IBuilder* const builder, nvinfer1::IBuilderConfig* const builderConfig, NvDsInferYoloCudaEngineGet(nvinfer1::IBuilder* const builder, nvinfer1::IBuilderConfig* const builderConfig,
const NvDsInferContextInitParams* const initParams, nvinfer1::DataType dataType, nvinfer1::ICudaEngine*& cudaEngine) const NvDsInferContextInitParams* const initParams, nvinfer1::DataType dataType, nvinfer1::ICudaEngine*& cudaEngine)
#else
extern "C" bool
NvDsInferYoloCudaEngineGet(nvinfer1::IBuilder* const builder, const NvDsInferContextInitParams* const initParams,
nvinfer1::DataType dataType, nvinfer1::ICudaEngine*& cudaEngine);
extern "C" bool
NvDsInferYoloCudaEngineGet(nvinfer1::IBuilder* const builder, const NvDsInferContextInitParams* const initParams,
nvinfer1::DataType dataType, nvinfer1::ICudaEngine*& cudaEngine)
#endif
{ {
NetworkInfo networkInfo; NetworkInfo networkInfo;
if (!getYoloNetworkInfo(networkInfo, initParams)) if (!getYoloNetworkInfo(networkInfo, initParams))
return false; return false;
Yolo yolo(networkInfo); Yolo yolo(networkInfo);
#if NV_TENSORRT_MAJOR >= 8
cudaEngine = yolo.createEngine(builder, builderConfig); cudaEngine = yolo.createEngine(builder, builderConfig);
#else
cudaEngine = yolo.createEngine(builder);
#endif
if (cudaEngine == nullptr) { if (cudaEngine == nullptr) {
std::cerr << "Failed to build CUDA engine" << std::endl; std::cerr << "Failed to build CUDA engine" << std::endl;
return false; return false;

View File

@@ -39,8 +39,8 @@ Yolo::Yolo(const NetworkInfo& networkInfo) : m_InputBlobName(networkInfo.inputBl
m_ImplicitBatch(networkInfo.implicitBatch), m_Int8CalibPath(networkInfo.int8CalibPath), m_ImplicitBatch(networkInfo.implicitBatch), m_Int8CalibPath(networkInfo.int8CalibPath),
m_DeviceType(networkInfo.deviceType), m_NumDetectedClasses(networkInfo.numDetectedClasses), m_DeviceType(networkInfo.deviceType), m_NumDetectedClasses(networkInfo.numDetectedClasses),
m_ClusterMode(networkInfo.clusterMode), m_NetworkMode(networkInfo.networkMode), m_ScaleFactor(networkInfo.scaleFactor), m_ClusterMode(networkInfo.clusterMode), m_NetworkMode(networkInfo.networkMode), m_ScaleFactor(networkInfo.scaleFactor),
m_Offsets(networkInfo.offsets), m_InputC(0), m_InputH(0), m_InputW(0), m_InputSize(0), m_NumClasses(0), m_LetterBox(0), m_Offsets(networkInfo.offsets), m_WorkspaceSize(networkInfo.workspaceSize), m_InputC(0), m_InputH(0), m_InputW(0),
m_NewCoords(0), m_YoloCount(0) m_InputSize(0), m_NumClasses(0), m_LetterBox(0), m_NewCoords(0), m_YoloCount(0)
{ {
} }
@@ -50,10 +50,22 @@ Yolo::~Yolo()
} }
nvinfer1::ICudaEngine* nvinfer1::ICudaEngine*
#if NV_TENSORRT_MAJOR >= 8
Yolo::createEngine(nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config) Yolo::createEngine(nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config)
#else
Yolo::createEngine(nvinfer1::IBuilder* builder)
#endif
{ {
assert(builder); assert(builder);
#if NV_TENSORRT_MAJOR < 8
nvinfer1::IBuilderConfig* config = builder->createBuilderConfig();
if (m_WorkspaceSize > 0) {
config->setMaxWorkspaceSize((size_t) m_WorkspaceSize * 1024 * 1024);
}
#endif
nvinfer1::NetworkDefinitionCreationFlags flags = nvinfer1::NetworkDefinitionCreationFlags flags =
1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
@@ -63,7 +75,13 @@ Yolo::createEngine(nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config
nvonnxparser::IParser* parser; nvonnxparser::IParser* parser;
if (m_NetworkType == "onnx") { if (m_NetworkType == "onnx") {
#if NV_TENSORRT_MAJOR >= 8
parser = nvonnxparser::createParser(*network, *builder->getLogger()); parser = nvonnxparser::createParser(*network, *builder->getLogger());
#else
parser = nvonnxparser::createParser(*network, logger);
#endif
if (!parser->parseFromFile(m_OnnxWtsFilePath.c_str(), static_cast<INT>(nvinfer1::ILogger::Severity::kWARNING))) { if (!parser->parseFromFile(m_OnnxWtsFilePath.c_str(), static_cast<INT>(nvinfer1::ILogger::Severity::kWARNING))) {
std::cerr << "\nCould not parse the ONNX model\n" << std::endl; std::cerr << "\nCould not parse the ONNX model\n" << std::endl;
@@ -72,6 +90,7 @@ Yolo::createEngine(nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config
delete network; delete network;
#else #else
parser->destroy(); parser->destroy();
config->destroy();
network->destroy(); network->destroy();
#endif #endif
@@ -89,6 +108,7 @@ Yolo::createEngine(nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config
#if NV_TENSORRT_MAJOR >= 8 #if NV_TENSORRT_MAJOR >= 8
delete network; delete network;
#else #else
config->destroy();
network->destroy(); network->destroy();
#endif #endif
@@ -170,6 +190,7 @@ Yolo::createEngine(nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config
if (m_NetworkType == "onnx") { if (m_NetworkType == "onnx") {
parser->destroy(); parser->destroy();
} }
config->destroy();
network->destroy(); network->destroy();
#endif #endif
@@ -220,6 +241,7 @@ Yolo::createEngine(nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config
if (m_NetworkType == "onnx") { if (m_NetworkType == "onnx") {
parser->destroy(); parser->destroy();
} }
config->destroy();
network->destroy(); network->destroy();
#endif #endif

View File

@@ -42,9 +42,16 @@
#include "layers/reorg_layer.h" #include "layers/reorg_layer.h"
#if NV_TENSORRT_MAJOR >= 8 #if NV_TENSORRT_MAJOR >= 8
#define INT int32_t #define INT int32_t
#else #else
#define INT int #define INT int
static class Logger : public nvinfer1::ILogger {
void log(nvinfer1::ILogger::Severity severity, const char* msg) noexcept override {
if (severity <= nvinfer1::ILogger::Severity::kWARNING)
std::cout << msg << std::endl;
}
} logger;
#endif #endif
struct NetworkInfo struct NetworkInfo
@@ -64,6 +71,7 @@ struct NetworkInfo
std::string networkMode; std::string networkMode;
float scaleFactor; float scaleFactor;
const float* offsets; const float* offsets;
uint workspaceSize;
}; };
struct TensorInfo struct TensorInfo
@@ -92,7 +100,11 @@ class Yolo : public IModelParser {
NvDsInferStatus parseModel(nvinfer1::INetworkDefinition& network) override; NvDsInferStatus parseModel(nvinfer1::INetworkDefinition& network) override;
#if NV_TENSORRT_MAJOR >= 8
nvinfer1::ICudaEngine* createEngine(nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config); nvinfer1::ICudaEngine* createEngine(nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config);
#else
nvinfer1::ICudaEngine* createEngine(nvinfer1::IBuilder* builder);
#endif
protected: protected:
const std::string m_InputBlobName; const std::string m_InputBlobName;
@@ -110,6 +122,7 @@ class Yolo : public IModelParser {
const std::string m_NetworkMode; const std::string m_NetworkMode;
const float m_ScaleFactor; const float m_ScaleFactor;
const float* m_Offsets; const float* m_Offsets;
const uint m_WorkspaceSize;
uint m_InputC; uint m_InputC;
uint m_InputH; uint m_InputH;