Add DeepStream 5.1 support
This commit is contained in:
45
README.md
45
README.md
@@ -1,6 +1,6 @@
|
|||||||
# DeepStream-Yolo
|
# DeepStream-Yolo
|
||||||
|
|
||||||
NVIDIA DeepStream SDK 6.2 / 6.1.1 / 6.1 / 6.0.1 / 6.0 configuration for YOLO models
|
NVIDIA DeepStream SDK 6.2 / 6.1.1 / 6.1 / 6.0.1 / 6.0 / 5.1 configuration for YOLO models
|
||||||
|
|
||||||
--------------------------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------------------------
|
||||||
### Important: please generate the ONNX model and the TensorRT engine again with the updated files
|
### Important: please generate the ONNX model and the TensorRT engine again with the updated files
|
||||||
@@ -21,8 +21,9 @@ NVIDIA DeepStream SDK 6.2 / 6.1.1 / 6.1 / 6.0.1 / 6.0 configuration for YOLO mod
|
|||||||
* Models benchmarks
|
* Models benchmarks
|
||||||
* **Support for Darknet YOLO models (YOLOv4, etc) using cfg and weights conversion with GPU post-processing**
|
* **Support for Darknet YOLO models (YOLOv4, etc) using cfg and weights conversion with GPU post-processing**
|
||||||
* **Support for YOLO-NAS, PPYOLOE+, PPYOLOE, DAMO-YOLO, YOLOX, YOLOR, YOLOv8, YOLOv7, YOLOv6 and YOLOv5 using ONNX conversion with GPU post-processing**
|
* **Support for YOLO-NAS, PPYOLOE+, PPYOLOE, DAMO-YOLO, YOLOX, YOLOR, YOLOv8, YOLOv7, YOLOv6 and YOLOv5 using ONNX conversion with GPU post-processing**
|
||||||
* **Add GPU bbox parser (it is slightly slower than CPU bbox parser on V100 GPU tests)**
|
* **GPU bbox parser (it is slightly slower than CPU bbox parser on V100 GPU tests)**
|
||||||
* **Dynamic batch-size for ONNX exported models (YOLO-NAS, PPYOLOE+, PPYOLOE, DAMO-YOLO, YOLOX, YOLOR, YOLOv8, YOLOv7, YOLOv6 and YOLOv5)**
|
* **Dynamic batch-size for ONNX exported models (YOLO-NAS, PPYOLOE+, PPYOLOE, DAMO-YOLO, YOLOX, YOLOR, YOLOv8, YOLOv7, YOLOv6 and YOLOv5)**
|
||||||
|
* **Support for DeepStream 5.1**
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
@@ -92,6 +93,16 @@ NVIDIA DeepStream SDK 6.2 / 6.1.1 / 6.1 / 6.0.1 / 6.0 configuration for YOLO mod
|
|||||||
* [GStreamer 1.14.5](https://gstreamer.freedesktop.org/)
|
* [GStreamer 1.14.5](https://gstreamer.freedesktop.org/)
|
||||||
* [DeepStream-Yolo](https://github.com/marcoslucianops/DeepStream-Yolo)
|
* [DeepStream-Yolo](https://github.com/marcoslucianops/DeepStream-Yolo)
|
||||||
|
|
||||||
|
#### DeepStream 5.1 on x86 platform
|
||||||
|
|
||||||
|
* [Ubuntu 18.04](https://releases.ubuntu.com/18.04.6/)
|
||||||
|
* [CUDA 11.1](https://developer.nvidia.com/cuda-11.1.0-download-archive?target_os=Linux&target_arch=x86_64&target_distro=Ubuntu&target_version=1804&target_type=runfilelocal)
|
||||||
|
* [TensorRT 7.2.2](https://developer.nvidia.com/nvidia-tensorrt-7x-download)
|
||||||
|
* [NVIDIA Driver 460.32.03](https://www.nvidia.com.br/Download/index.aspx)
|
||||||
|
* [NVIDIA DeepStream SDK 5.1](https://developer.nvidia.com/deepstream-sdk-download-tesla-archived)
|
||||||
|
* [GStreamer 1.14.5](https://gstreamer.freedesktop.org/)
|
||||||
|
* [DeepStream-Yolo](https://github.com/marcoslucianops/DeepStream-Yolo)
|
||||||
|
|
||||||
#### DeepStream 6.2 on Jetson platform
|
#### DeepStream 6.2 on Jetson platform
|
||||||
|
|
||||||
* [JetPack 5.1.1 / 5.1](https://developer.nvidia.com/embedded/jetpack)
|
* [JetPack 5.1.1 / 5.1](https://developer.nvidia.com/embedded/jetpack)
|
||||||
@@ -116,6 +127,12 @@ NVIDIA DeepStream SDK 6.2 / 6.1.1 / 6.1 / 6.0.1 / 6.0 configuration for YOLO mod
|
|||||||
* [NVIDIA DeepStream SDK 6.0.1 / 6.0](https://developer.nvidia.com/embedded/deepstream-on-jetson-downloads-archived)
|
* [NVIDIA DeepStream SDK 6.0.1 / 6.0](https://developer.nvidia.com/embedded/deepstream-on-jetson-downloads-archived)
|
||||||
* [DeepStream-Yolo](https://github.com/marcoslucianops/DeepStream-Yolo)
|
* [DeepStream-Yolo](https://github.com/marcoslucianops/DeepStream-Yolo)
|
||||||
|
|
||||||
|
#### DeepStream 5.1 on Jetson platform
|
||||||
|
|
||||||
|
* [JetPack 4.5.1](https://developer.nvidia.com/embedded/jetpack-sdk-451-archive)
|
||||||
|
* [NVIDIA DeepStream SDK 5.1](https://developer.nvidia.com/embedded/deepstream-on-jetson-downloads-archived)
|
||||||
|
* [DeepStream-Yolo](https://github.com/marcoslucianops/DeepStream-Yolo)
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
### Suported models
|
### Suported models
|
||||||
@@ -950,6 +967,12 @@ cd DeepStream-Yolo
|
|||||||
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on x86 platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=11.1 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -962,6 +985,12 @@ cd DeepStream-Yolo
|
|||||||
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on Jetson platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=10.2 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
#### 4. Edit the `config_infer_primary.txt` file according to your model (example for YOLOv4)
|
#### 4. Edit the `config_infer_primary.txt` file according to your model (example for YOLOv4)
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -1073,6 +1102,12 @@ sudo apt-get install libopencv-dev
|
|||||||
CUDA_VER=11.4 OPENCV=1 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=11.4 OPENCV=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on x86 platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=11.1 OPENCV=1 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -1085,6 +1120,12 @@ sudo apt-get install libopencv-dev
|
|||||||
CUDA_VER=10.2 OPENCV=1 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=10.2 OPENCV=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on Jetson platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=10.2 OPENCV=1 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
#### 3. For COCO dataset, download the [val2017](https://drive.google.com/file/d/1gbvfn7mcsGDRZ_luJwtITL-ru2kK99aK/view?usp=sharing), extract, and move to DeepStream-Yolo folder
|
#### 3. For COCO dataset, download the [val2017](https://drive.google.com/file/d/1gbvfn7mcsGDRZ_luJwtITL-ru2kK99aK/view?usp=sharing), extract, and move to DeepStream-Yolo folder
|
||||||
|
|
||||||
* Select 1000 random images from COCO dataset to run calibration
|
* Select 1000 random images from COCO dataset to run calibration
|
||||||
|
|||||||
@@ -43,6 +43,12 @@ Generate the ONNX model file (example for DAMO-YOLO-S*)
|
|||||||
python3 export_damoyolo.py -w damoyolo_tinynasL25_S_477.pth -c configs/damoyolo_tinynasL25_S.py --simplify --dynamic
|
python3 export_damoyolo.py -w damoyolo_tinynasL25_S_477.pth -c configs/damoyolo_tinynasL25_S.py --simplify --dynamic
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**NOTE**: If you are using DeepStream 5.1, use opset 11 or lower.
|
||||||
|
|
||||||
|
```
|
||||||
|
--opset 11
|
||||||
|
```
|
||||||
|
|
||||||
**NOTE**: To change the inference size (defaut: 640)
|
**NOTE**: To change the inference size (defaut: 640)
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -98,6 +104,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on x86 platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=11.1 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -110,6 +122,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on Jetson platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=10.2 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
### Edit the config_infer_primary_damoyolo file
|
### Edit the config_infer_primary_damoyolo file
|
||||||
|
|||||||
@@ -41,6 +41,12 @@ pip3 install onnx onnxsim onnxruntime
|
|||||||
python3 export_ppyoloe.py -w ppyoloe_plus_crn_s_80e_coco.pdparams -c configs/ppyoloe/ppyoloe_plus_crn_s_80e_coco.yml --simplify
|
python3 export_ppyoloe.py -w ppyoloe_plus_crn_s_80e_coco.pdparams -c configs/ppyoloe/ppyoloe_plus_crn_s_80e_coco.yml --simplify
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 11.
|
||||||
|
|
||||||
|
```
|
||||||
|
--opset 12
|
||||||
|
```
|
||||||
|
|
||||||
#### 5. Copy generated files
|
#### 5. Copy generated files
|
||||||
|
|
||||||
Copy the generated ONNX model file and labels.txt file (if generated) to the `DeepStream-Yolo` folder.
|
Copy the generated ONNX model file and labels.txt file (if generated) to the `DeepStream-Yolo` folder.
|
||||||
@@ -75,6 +81,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on x86 platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=11.1 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -87,6 +99,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on Jetson platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=10.2 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
### Edit the config_infer_primary_ppyoloe_plus file
|
### Edit the config_infer_primary_ppyoloe_plus file
|
||||||
|
|||||||
@@ -46,6 +46,12 @@ Generate the ONNX model file (example for YOLO-NAS S)
|
|||||||
python3 export_yolonas.py -m yolo_nas_s -w yolo_nas_s_coco.pth --simplify --dynamic
|
python3 export_yolonas.py -m yolo_nas_s -w yolo_nas_s_coco.pth --simplify --dynamic
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 14.
|
||||||
|
|
||||||
|
```
|
||||||
|
--opset 12
|
||||||
|
```
|
||||||
|
|
||||||
**NOTE**: Model names
|
**NOTE**: Model names
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -119,6 +125,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on x86 platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=11.1 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -131,6 +143,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on Jetson platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=10.2 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
### Edit the config_infer_primary_yolonas file
|
### Edit the config_infer_primary_yolonas file
|
||||||
|
|||||||
@@ -55,6 +55,12 @@ Generate the ONNX model file
|
|||||||
python3 export_yolor.py -w yolor-p6.pt --simplify --dynamic
|
python3 export_yolor.py -w yolor-p6.pt --simplify --dynamic
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 12.
|
||||||
|
|
||||||
|
```
|
||||||
|
--opset 12
|
||||||
|
```
|
||||||
|
|
||||||
**NOTE**: To convert a P6 model
|
**NOTE**: To convert a P6 model
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -116,6 +122,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on x86 platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=11.1 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -128,6 +140,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on Jetson platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=10.2 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
### Edit the config_infer_primary_yolor file
|
### Edit the config_infer_primary_yolor file
|
||||||
|
|||||||
@@ -46,6 +46,12 @@ Generate the ONNX model file (example for YOLOX-s)
|
|||||||
python3 export_yolox.py -w yolox_s.pth -c exps/default/yolox_s.py --simplify --dynamic
|
python3 export_yolox.py -w yolox_s.pth -c exps/default/yolox_s.py --simplify --dynamic
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 11.
|
||||||
|
|
||||||
|
```
|
||||||
|
--opset 12
|
||||||
|
```
|
||||||
|
|
||||||
#### 5. Copy generated file
|
#### 5. Copy generated file
|
||||||
|
|
||||||
Copy the generated ONNX model file to the `DeepStream-Yolo` folder.
|
Copy the generated ONNX model file to the `DeepStream-Yolo` folder.
|
||||||
@@ -80,6 +86,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on x86 platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=11.1 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -92,6 +104,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on Jetson platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=10.2 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
### Edit the config_infer_primary_yolox file
|
### Edit the config_infer_primary_yolox file
|
||||||
|
|||||||
@@ -47,6 +47,12 @@ Generate the ONNX model file (example for YOLOv5s)
|
|||||||
python3 export_yoloV5.py -w yolov5s.pt --simplify --dynamic
|
python3 export_yoloV5.py -w yolov5s.pt --simplify --dynamic
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 17.
|
||||||
|
|
||||||
|
```
|
||||||
|
--opset 12
|
||||||
|
```
|
||||||
|
|
||||||
**NOTE**: To convert a P6 model
|
**NOTE**: To convert a P6 model
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -108,6 +114,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on x86 platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=11.1 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -120,6 +132,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on Jetson platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=10.2 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
### Edit the config_infer_primary_yoloV5 file
|
### Edit the config_infer_primary_yoloV5 file
|
||||||
|
|||||||
@@ -47,6 +47,12 @@ Generate the ONNX model file (example for YOLOv6-S 4.0)
|
|||||||
python3 export_yoloV6.py -w yolov6s.pt --simplify --dynamic
|
python3 export_yoloV6.py -w yolov6s.pt --simplify --dynamic
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 13.
|
||||||
|
|
||||||
|
```
|
||||||
|
--opset 12
|
||||||
|
```
|
||||||
|
|
||||||
**NOTE**: To convert a P6 model
|
**NOTE**: To convert a P6 model
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -108,6 +114,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on x86 platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=11.1 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -120,6 +132,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on Jetson platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=10.2 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
### Edit the config_infer_primary_yoloV6 file
|
### Edit the config_infer_primary_yoloV6 file
|
||||||
|
|||||||
@@ -49,6 +49,12 @@ Generate the ONNX model file (example for YOLOv7)
|
|||||||
python3 export_yoloV7.py -w yolov7.pt --simplify --dynamic
|
python3 export_yoloV7.py -w yolov7.pt --simplify --dynamic
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 12.
|
||||||
|
|
||||||
|
```
|
||||||
|
--opset 12
|
||||||
|
```
|
||||||
|
|
||||||
**NOTE**: To convert a P6 model
|
**NOTE**: To convert a P6 model
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -110,6 +116,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on x86 platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=11.1 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -122,6 +134,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on Jetson platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=10.2 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
### Edit the config_infer_primary_yoloV7 file
|
### Edit the config_infer_primary_yoloV7 file
|
||||||
|
|||||||
@@ -46,6 +46,12 @@ Generate the ONNX model file (example for YOLOv8s)
|
|||||||
python3 export_yoloV8.py -w yolov8s.pt --simplify --dynamic
|
python3 export_yoloV8.py -w yolov8s.pt --simplify --dynamic
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**NOTE**: If you are using DeepStream 5.1, use opset 12 or lower. The default opset is 16.
|
||||||
|
|
||||||
|
```
|
||||||
|
--opset 12
|
||||||
|
```
|
||||||
|
|
||||||
**NOTE**: To change the inference size (defaut: 640)
|
**NOTE**: To change the inference size (defaut: 640)
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -101,6 +107,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=11.4 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on x86 platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=11.1 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
* DeepStream 6.2 / 6.1.1 / 6.1 on Jetson platform
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -113,6 +125,12 @@ Open the `DeepStream-Yolo` folder and compile the lib
|
|||||||
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* DeepStream 5.1 on Jetson platform
|
||||||
|
|
||||||
|
```
|
||||||
|
CUDA_VER=10.2 LEGACY=1 make -C nvdsinfer_custom_impl_Yolo
|
||||||
|
```
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
### Edit the config_infer_primary_yoloV8 file
|
### Edit the config_infer_primary_yoloV8 file
|
||||||
|
|||||||
@@ -33,6 +33,11 @@ ifeq ($(OPENCV),)
|
|||||||
OPENCV=0
|
OPENCV=0
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
LEGACY?=
|
||||||
|
ifeq ($(LEGACY),)
|
||||||
|
LEGACY=0
|
||||||
|
endif
|
||||||
|
|
||||||
CC:= g++
|
CC:= g++
|
||||||
NVCC:=/usr/local/cuda-$(CUDA_VER)/bin/nvcc
|
NVCC:=/usr/local/cuda-$(CUDA_VER)/bin/nvcc
|
||||||
|
|
||||||
@@ -40,11 +45,15 @@ CFLAGS:= -Wall -std=c++11 -shared -fPIC -Wno-error=deprecated-declarations
|
|||||||
CFLAGS+= -I/opt/nvidia/deepstream/deepstream/sources/includes -I/usr/local/cuda-$(CUDA_VER)/include
|
CFLAGS+= -I/opt/nvidia/deepstream/deepstream/sources/includes -I/usr/local/cuda-$(CUDA_VER)/include
|
||||||
|
|
||||||
ifeq ($(OPENCV), 1)
|
ifeq ($(OPENCV), 1)
|
||||||
COMMON= -DOPENCV
|
COMMON+= -DOPENCV
|
||||||
CFLAGS+= $(shell pkg-config --cflags opencv4 2> /dev/null || pkg-config --cflags opencv)
|
CFLAGS+= $(shell pkg-config --cflags opencv4 2> /dev/null || pkg-config --cflags opencv)
|
||||||
LIBS+= $(shell pkg-config --libs opencv4 2> /dev/null || pkg-config --libs opencv)
|
LIBS+= $(shell pkg-config --libs opencv4 2> /dev/null || pkg-config --libs opencv)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifeq ($(LEGACY), 1)
|
||||||
|
COMMON+= -DLEGACY
|
||||||
|
endif
|
||||||
|
|
||||||
CUFLAGS:= -I/opt/nvidia/deepstream/deepstream/sources/includes -I/usr/local/cuda-$(CUDA_VER)/include
|
CUFLAGS:= -I/opt/nvidia/deepstream/deepstream/sources/includes -I/usr/local/cuda-$(CUDA_VER)/include
|
||||||
|
|
||||||
LIBS+= -lnvinfer_plugin -lnvinfer -lnvparsers -L/usr/local/cuda-$(CUDA_VER)/lib64 -lcudart -lcublas -lstdc++fs
|
LIBS+= -lnvinfer_plugin -lnvinfer -lnvparsers -L/usr/local/cuda-$(CUDA_VER)/lib64 -lcudart -lcublas -lstdc++fs
|
||||||
|
|||||||
@@ -54,7 +54,13 @@ Yolo::createEngine(nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config
|
|||||||
|
|
||||||
nvinfer1::INetworkDefinition *network = builder->createNetworkV2(0);
|
nvinfer1::INetworkDefinition *network = builder->createNetworkV2(0);
|
||||||
if (parseModel(*network) != NVDSINFER_SUCCESS) {
|
if (parseModel(*network) != NVDSINFER_SUCCESS) {
|
||||||
|
|
||||||
|
#ifdef LEGACY
|
||||||
|
network->destroy();
|
||||||
|
#else
|
||||||
delete network;
|
delete network;
|
||||||
|
#endif
|
||||||
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -105,7 +111,12 @@ Yolo::createEngine(nvinfer1::IBuilder* builder, nvinfer1::IBuilderConfig* config
|
|||||||
else
|
else
|
||||||
std::cerr << "Building engine failed\n" << std::endl;
|
std::cerr << "Building engine failed\n" << std::endl;
|
||||||
|
|
||||||
delete network;
|
#ifdef LEGACY
|
||||||
|
network->destroy();
|
||||||
|
#else
|
||||||
|
delete network;
|
||||||
|
#endif
|
||||||
|
|
||||||
return engine;
|
return engine;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -120,9 +120,14 @@ YoloLayer::configureWithFormat(const nvinfer1::Dims* inputDims, int nbInputs, co
|
|||||||
assert(inputDims != nullptr);
|
assert(inputDims != nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef LEGACY
|
||||||
|
int
|
||||||
|
YoloLayer::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream)
|
||||||
|
#else
|
||||||
int32_t
|
int32_t
|
||||||
YoloLayer::enqueue(int batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream)
|
YoloLayer::enqueue(int batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream)
|
||||||
noexcept
|
noexcept
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
void* output = outputs[0];
|
void* output = outputs[0];
|
||||||
CUDA_CHECK(cudaMemsetAsync((float*) output, 0, sizeof(float) * m_OutputSize * 6 * batchSize, stream));
|
CUDA_CHECK(cudaMemsetAsync((float*) output, 0, sizeof(float) * m_OutputSize * 6 * batchSize, stream));
|
||||||
|
|||||||
@@ -71,8 +71,12 @@ class YoloLayer : public nvinfer1::IPluginV2 {
|
|||||||
return maxBatchSize * sizeof(int);
|
return maxBatchSize * sizeof(int);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef LEGACY
|
||||||
|
int enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) override;
|
||||||
|
#else
|
||||||
int32_t enqueue(int batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream)
|
int32_t enqueue(int batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream)
|
||||||
noexcept override;
|
noexcept override;
|
||||||
|
#endif
|
||||||
|
|
||||||
size_t getSerializationSize() const noexcept override;
|
size_t getSerializationSize() const noexcept override;
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user