@@ -63,7 +63,7 @@ config-file=pgie/config_infer_primary.txt
|
|||||||
enable=1
|
enable=1
|
||||||
gpu-id=0
|
gpu-id=0
|
||||||
gie-unique-id=2
|
gie-unique-id=2
|
||||||
operate-on-gie-id=1
|
#operate-on-gie-id=1
|
||||||
#operate-on-class-ids=0
|
#operate-on-class-ids=0
|
||||||
nvbuf-memory-type=0
|
nvbuf-memory-type=0
|
||||||
config-file=sgie1/config_infer_secondary1.txt
|
config-file=sgie1/config_infer_secondary1.txt
|
||||||
|
|||||||
@@ -2,10 +2,10 @@
|
|||||||
gpu-id=0
|
gpu-id=0
|
||||||
net-scale-factor=0.0039215697906911373
|
net-scale-factor=0.0039215697906911373
|
||||||
model-color-format=0
|
model-color-format=0
|
||||||
custom-network-config=yolo_pgie.cfg
|
custom-network-config=yolo.cfg
|
||||||
model-file=yolo_pgie.weights
|
model-file=yolo.weights
|
||||||
model-engine-file=pgie_b1_gpu0_fp16.engine
|
model-engine-file=model_b1_gpu0_fp16.engine
|
||||||
labelfile-path=labels_pgie.txt
|
labelfile-path=labels.txt
|
||||||
batch-size=1
|
batch-size=1
|
||||||
network-mode=2
|
network-mode=2
|
||||||
num-detected-classes=2
|
num-detected-classes=2
|
||||||
@@ -31,7 +31,7 @@ CC:= g++
|
|||||||
NVCC:=/usr/local/cuda-$(CUDA_VER)/bin/nvcc
|
NVCC:=/usr/local/cuda-$(CUDA_VER)/bin/nvcc
|
||||||
|
|
||||||
CFLAGS:= -Wall -std=c++11 -shared -fPIC -Wno-error=deprecated-declarations
|
CFLAGS:= -Wall -std=c++11 -shared -fPIC -Wno-error=deprecated-declarations
|
||||||
CFLAGS+= -I../../includes -I/usr/local/cuda-$(CUDA_VER)/include
|
CFLAGS+= -I../../../includes -I/usr/local/cuda-$(CUDA_VER)/include
|
||||||
|
|
||||||
LIBS:= -lnvinfer_plugin -lnvinfer -lnvparsers -L/usr/local/cuda-$(CUDA_VER)/lib64 -lcudart -lcublas -lstdc++fs
|
LIBS:= -lnvinfer_plugin -lnvinfer -lnvparsers -L/usr/local/cuda-$(CUDA_VER)/lib64 -lcudart -lcublas -lstdc++fs
|
||||||
LFLAGS:= -shared -Wl,--start-group $(LIBS) -Wl,--end-group
|
LFLAGS:= -shared -Wl,--start-group $(LIBS) -Wl,--end-group
|
||||||
@@ -2,16 +2,18 @@
|
|||||||
gpu-id=0
|
gpu-id=0
|
||||||
net-scale-factor=0.0039215697906911373
|
net-scale-factor=0.0039215697906911373
|
||||||
model-color-format=0
|
model-color-format=0
|
||||||
custom-network-config=yolo_sgie1.cfg
|
custom-network-config=yolo.cfg
|
||||||
model-file=yolo_sgie1.weights
|
model-file=yolo.weights
|
||||||
model-engine-file=sgie1_b16_gpu0_fp16.engine
|
model-engine-file=model_b1_gpu0_fp16.engine
|
||||||
labelfile-path=labels_sgie1.txt
|
labelfile-path=labels.txt
|
||||||
batch-size=16
|
batch-size=16
|
||||||
network-mode=2
|
network-mode=2
|
||||||
num-detected-classes=10
|
num-detected-classes=10
|
||||||
interval=0
|
interval=0
|
||||||
gie-unique-id=2
|
gie-unique-id=2
|
||||||
process-mode=2
|
process-mode=2
|
||||||
|
#operate-on-gie-id=1
|
||||||
|
#operate-on-class-ids=0
|
||||||
network-type=0
|
network-type=0
|
||||||
cluster-mode=4
|
cluster-mode=4
|
||||||
maintain-aspect-ratio=0
|
maintain-aspect-ratio=0
|
||||||
@@ -0,0 +1,71 @@
|
|||||||
|
################################################################################
|
||||||
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
# copy of this software and associated documentation files (the "Software"),
|
||||||
|
# to deal in the Software without restriction, including without limitation
|
||||||
|
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
# and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
# Software is furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in
|
||||||
|
# all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||||
|
# DEALINGS IN THE SOFTWARE.
|
||||||
|
#
|
||||||
|
# Edited by Marcos Luciano
|
||||||
|
# https://www.github.com/marcoslucianops
|
||||||
|
################################################################################
|
||||||
|
|
||||||
|
CUDA_VER?=
|
||||||
|
ifeq ($(CUDA_VER),)
|
||||||
|
$(error "CUDA_VER is not set")
|
||||||
|
endif
|
||||||
|
CC:= g++
|
||||||
|
NVCC:=/usr/local/cuda-$(CUDA_VER)/bin/nvcc
|
||||||
|
|
||||||
|
CFLAGS:= -Wall -std=c++11 -shared -fPIC -Wno-error=deprecated-declarations
|
||||||
|
CFLAGS+= -I../../../includes -I/usr/local/cuda-$(CUDA_VER)/include
|
||||||
|
|
||||||
|
LIBS:= -lnvinfer_plugin -lnvinfer -lnvparsers -L/usr/local/cuda-$(CUDA_VER)/lib64 -lcudart -lcublas -lstdc++fs
|
||||||
|
LFLAGS:= -shared -Wl,--start-group $(LIBS) -Wl,--end-group
|
||||||
|
|
||||||
|
INCS:= $(wildcard *.h)
|
||||||
|
SRCFILES:= nvdsinfer_yolo_engine.cpp \
|
||||||
|
nvdsparsebbox_Yolo.cpp \
|
||||||
|
yoloPlugins.cpp \
|
||||||
|
layers/convolutional_layer.cpp \
|
||||||
|
layers/dropout_layer.cpp \
|
||||||
|
layers/shortcut_layer.cpp \
|
||||||
|
layers/route_layer.cpp \
|
||||||
|
layers/upsample_layer.cpp \
|
||||||
|
layers/maxpool_layer.cpp \
|
||||||
|
layers/activation_layer.cpp \
|
||||||
|
utils.cpp \
|
||||||
|
yolo.cpp \
|
||||||
|
yoloForward.cu
|
||||||
|
TARGET_LIB:= libnvdsinfer_custom_impl_Yolo.so
|
||||||
|
|
||||||
|
TARGET_OBJS:= $(SRCFILES:.cpp=.o)
|
||||||
|
TARGET_OBJS:= $(TARGET_OBJS:.cu=.o)
|
||||||
|
|
||||||
|
all: $(TARGET_LIB)
|
||||||
|
|
||||||
|
%.o: %.cpp $(INCS) Makefile
|
||||||
|
$(CC) -c -o $@ $(CFLAGS) $<
|
||||||
|
|
||||||
|
%.o: %.cu $(INCS) Makefile
|
||||||
|
$(NVCC) -c -o $@ --compiler-options '-fPIC' $<
|
||||||
|
|
||||||
|
$(TARGET_LIB) : $(TARGET_OBJS)
|
||||||
|
$(CC) -o $@ $(TARGET_OBJS) $(LFLAGS)
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -rf $(TARGET_LIB)
|
||||||
|
rm -rf $(TARGET_OBJS)
|
||||||
@@ -4,44 +4,57 @@ How to use multiples GIE's on DeepStream
|
|||||||
##
|
##
|
||||||
|
|
||||||
1. Download [my native folder](https://github.com/marcoslucianops/DeepStream-Yolo/tree/master/native), rename to yolo and move to your deepstream/sources folder.
|
1. Download [my native folder](https://github.com/marcoslucianops/DeepStream-Yolo/tree/master/native), rename to yolo and move to your deepstream/sources folder.
|
||||||
2. Copy each obj.names to deepstream/sources/yolo directory, renaming file to labels_*.txt (* = pgie/sgie1/sgie2/etc), according to each inference type.
|
2. Make a folder, in deepstream/sources/yolo directory, named pgie (where you will put files of primary inference).
|
||||||
3. Copy each yolo.cfg and yolo.weights files to deepstream/sources/yolo directory, renaming files to yolo_*.cfg and yolo_*.weights (* = pgie/sgie1/sgie2/etc), according to each inference type.
|
3. Make a folder, for each secondary inference, in deepstream/sources/yolo directory, named sgie* (* = 1, 2, 3, etc.; depending on the number of secondary inferences; where you will put files of others inferences).
|
||||||
4. Make a copy of config_infer_primary.txt file and rename it to config_infer_secondary*.txt (* = 1/2/3/etc), according to inference order.
|
4. Copy and remane each obj.names file to labels.txt in each inference directory (pgie, sgie*), according each inference type.
|
||||||
5. Edit DeepStream for your custom model, according to each yolo_*.cfg (* = pgie/sgie1/sgie2/etc) file: https://github.com/marcoslucianops/DeepStream-Yolo/blob/master/customModels.md
|
5. Copy your yolo.cfg and yolo.weights files to each inference directory (pgie, sgie*), according each inference type.
|
||||||
|
6. Move nvdsinfer_custom_impl_Yolo folder and config_infer_primary.txt file to each inference directory (pgie, sgie*; for sgie's, rename config_infer_primary to config_infer_secondary*; * = 1, 2, 3, etc.)
|
||||||
|
7. Edit DeepStream for your custom model, according each yolo.cfg file: https://github.com/marcoslucianops/DeepStream-Yolo/blob/master/customModels.md
|
||||||
|
|
||||||
**In example folder, on this repository, have all example files to multiple YOLO inferences.**
|
**In example folder, on this repository, have all example files to multiple YOLO inferences.**
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
|
### Editing Makefile
|
||||||
|
To compile nvdsinfer_custom_impl_Yolo without errors is necessary to edit Makefile (line 34), in nvdsinfer_custom_impl_Yolo folder in each inference directory.
|
||||||
|
```
|
||||||
|
CFLAGS+= -I../../includes -I/usr/local/cuda-$(CUDA_VER)/include
|
||||||
|
```
|
||||||
|
To:
|
||||||
|
```
|
||||||
|
CFLAGS+= -I../../../includes -I/usr/local/cuda-$(CUDA_VER)/include
|
||||||
|
```
|
||||||
|
|
||||||
|
##
|
||||||
|
|
||||||
### Compiling edited models
|
### Compiling edited models
|
||||||
1. Check your CUDA version (nvcc --version)
|
1. Check your CUDA version (nvcc --version)
|
||||||
2. Go to deepstream/sources/yolo directory.
|
2. Go to inference directory.
|
||||||
3. Type command (example for CUDA 10.2 version):
|
3. Type command (example for CUDA 10.2 version):
|
||||||
|
|
||||||
```
|
```
|
||||||
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
CUDA_VER=10.2 make -C nvdsinfer_custom_impl_Yolo
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Do this for each GIE!**
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
### Add secondary-gie to deepstream_app_config after primary-gie
|
### Add secondary-gie to deepstream_app_config after primary-gie
|
||||||
|
|
||||||
Example for 1 secondary-gie (2 inferences):
|
Example for 1 secondary-gie (2 inferences):
|
||||||
|
|
||||||
```
|
```
|
||||||
[secondary-gie0]
|
[secondary-gie0]
|
||||||
enable=1
|
enable=1
|
||||||
gpu-id=0
|
gpu-id=0
|
||||||
gie-unique-id=2
|
gie-unique-id=2
|
||||||
|
# If you want secodary inference operate on specified GIE id (gie-unique-id you want to operate: 1, 2, etc; comment it if you don't want to use)
|
||||||
operate-on-gie-id=1
|
operate-on-gie-id=1
|
||||||
# If you want secodary inference operate on specified class ids of GIE (class ids you want to operate: 1, 1;2, 2;3;4, 3 etc; comment it if you don't want to use)
|
# If you want secodary inference operate on specified class ids of GIE (class ids you want to operate: 1, 1;2, 2;3;4, 3 etc; comment it if you don't want to use)
|
||||||
operate-on-class-ids=0
|
operate-on-class-ids=0
|
||||||
nvbuf-memory-type=0
|
nvbuf-memory-type=0
|
||||||
config-file=config_infer_secondary1.txt
|
config-file=sgie1/config_infer_secondary1.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
Example for 2 secondary-gie (3 inferences):
|
Example for 2 secondary-gie (3 inferences):
|
||||||
|
|
||||||
```
|
```
|
||||||
[secondary-gie0]
|
[secondary-gie0]
|
||||||
enable=1
|
enable=1
|
||||||
@@ -50,7 +63,7 @@ gie-unique-id=2
|
|||||||
operate-on-gie-id=1
|
operate-on-gie-id=1
|
||||||
operate-on-class-ids=0
|
operate-on-class-ids=0
|
||||||
nvbuf-memory-type=0
|
nvbuf-memory-type=0
|
||||||
config-file=config_infer_secondary1.txt
|
config-file=sgie1/config_infer_secondary1.txt
|
||||||
|
|
||||||
[secondary-gie1]
|
[secondary-gie1]
|
||||||
enable=1
|
enable=1
|
||||||
@@ -59,40 +72,51 @@ gie-unique-id=3
|
|||||||
operate-on-gie-id=1
|
operate-on-gie-id=1
|
||||||
operate-on-class-ids=0
|
operate-on-class-ids=0
|
||||||
nvbuf-memory-type=0
|
nvbuf-memory-type=0
|
||||||
config-file=config_infer_secondary2.txt
|
config-file=sgie2/config_infer_secondary2.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: remember to edit primary-gie
|
||||||
|
```
|
||||||
|
[primary-gie]
|
||||||
|
enable=1
|
||||||
|
gpu-id=0
|
||||||
|
gie-unique-id=1
|
||||||
|
nvbuf-memory-type=0
|
||||||
|
config-file=config_infer_primary.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
to
|
||||||
|
```
|
||||||
|
[primary-gie]
|
||||||
|
enable=1
|
||||||
|
gpu-id=0
|
||||||
|
gie-unique-id=1
|
||||||
|
nvbuf-memory-type=0
|
||||||
|
config-file=pgie/config_infer_primary.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
##
|
##
|
||||||
|
|
||||||
### Editing config_infer
|
### Editing config_infer
|
||||||
|
|
||||||
* Edit config_infer (config_infer_primary, config_infer_secondary1, etc.) files
|
* Edit path of config (config_infer_primary, config_infer_secondary1, etc.) files
|
||||||
|
|
||||||
Example for primary
|
Example for primary
|
||||||
|
|
||||||
```
|
```
|
||||||
custom-network-config=yolo_pgie.cfg
|
custom-network-config=pgie/yolo.cfg
|
||||||
model-file=yolo_pgie.weights
|
|
||||||
model-engine-file=pgie_b16_gpu0_fp16.engine
|
|
||||||
labelfile-path=labels_pgie.txt
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Example for secondary1
|
Example for secondary1
|
||||||
|
|
||||||
```
|
```
|
||||||
custom-network-config=yolo_sgie1.cfg
|
custom-network-config=sgie1/yolo.cfg
|
||||||
model-file=yolo_sgie1.weights
|
|
||||||
model-engine-file=sgie1_b16_gpu0_fp16.engine
|
|
||||||
labelfile-path=labels_sgie1.txt
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Example for secondary2
|
Example for secondary2
|
||||||
|
|
||||||
```
|
```
|
||||||
custom-network-config=yolo_sgie2.cfg
|
custom-network-config=sgie2/yolo.cfg
|
||||||
model-file=yolo_sgie2.weights
|
|
||||||
model-engine-file=sgie2_b16_gpu0_fp16.engine
|
|
||||||
labelfile-path=labels_sgie2.txt
|
|
||||||
```
|
```
|
||||||
|
|
||||||
##
|
##
|
||||||
@@ -137,6 +161,22 @@ Example for all secondary:
|
|||||||
batch-size=16
|
batch-size=16
|
||||||
```
|
```
|
||||||
|
|
||||||
|
##
|
||||||
|
|
||||||
|
* If you want secodary inference operate on specified GIE id (gie-unique-id you want to operate: 1, 2, etc.)
|
||||||
|
|
||||||
|
```
|
||||||
|
operate-on-gie-id=1
|
||||||
|
```
|
||||||
|
|
||||||
|
##
|
||||||
|
|
||||||
|
* If you want secodary inference operate on specified class ids of GIE (class ids you want to operate: 1, 1;2, 2;3;4, 3 etc.)
|
||||||
|
|
||||||
|
```
|
||||||
|
operate-on-class-ids=0
|
||||||
|
```
|
||||||
|
|
||||||
### Testing model
|
### Testing model
|
||||||
To run your custom YOLO model, use this command
|
To run your custom YOLO model, use this command
|
||||||
|
|
||||||
@@ -144,4 +184,4 @@ To run your custom YOLO model, use this command
|
|||||||
deepstream-app -c deepstream_app_config.txt
|
deepstream-app -c deepstream_app_config.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
** During test process, engine file will be generated. When engine build process is done, rename engine file according to each configured engine name pgie/sgie1/sgie2/etc) in config_infer file.
|
**During test process, engine file will be generated. When engine build process is done, move engine file to respective GIE folder (pgie, sgie1, etc.)**
|
||||||
|
|||||||
Reference in New Issue
Block a user