From 5b4505782186416eb8f114e256f7631dc38db69e Mon Sep 17 00:00:00 2001 From: Marcos Luciano Date: Thu, 31 Dec 2020 01:32:42 -0300 Subject: [PATCH] Fixed multipleInferences Updated and fixed multipleInferences files --- .../deepstream_app_config.txt | 2 +- .../pgie/config_infer_primary.txt | 2 +- .../nvdsinfer_custom_impl_Yolo/yoloPlugins.h | 155 ++++++++++++++++++ .../sgie1/config_infer_secondary1.txt | 6 +- .../nvdsinfer_custom_impl_Yolo/yoloPlugins.h | 155 ++++++++++++++++++ multipleInferences.md | 31 ++-- 6 files changed, 328 insertions(+), 23 deletions(-) create mode 100644 examples/multiple_inferences/pgie/nvdsinfer_custom_impl_Yolo/yoloPlugins.h create mode 100644 examples/multiple_inferences/sgie1/nvdsinfer_custom_impl_Yolo/yoloPlugins.h diff --git a/examples/multiple_inferences/deepstream_app_config.txt b/examples/multiple_inferences/deepstream_app_config.txt index c3f12ab..d359a3c 100644 --- a/examples/multiple_inferences/deepstream_app_config.txt +++ b/examples/multiple_inferences/deepstream_app_config.txt @@ -63,7 +63,7 @@ config-file=pgie/config_infer_primary.txt enable=1 gpu-id=0 gie-unique-id=2 -#operate-on-gie-id=1 +operate-on-gie-id=1 #operate-on-class-ids=0 nvbuf-memory-type=0 config-file=sgie1/config_infer_secondary1.txt diff --git a/examples/multiple_inferences/pgie/config_infer_primary.txt b/examples/multiple_inferences/pgie/config_infer_primary.txt index 19592e8..2a7711d 100644 --- a/examples/multiple_inferences/pgie/config_infer_primary.txt +++ b/examples/multiple_inferences/pgie/config_infer_primary.txt @@ -2,7 +2,7 @@ gpu-id=0 net-scale-factor=0.0039215697906911373 model-color-format=0 -custom-network-config=yolo.cfg +custom-network-config=pgie/yolo.cfg model-file=yolo.weights model-engine-file=model_b1_gpu0_fp16.engine labelfile-path=labels.txt diff --git a/examples/multiple_inferences/pgie/nvdsinfer_custom_impl_Yolo/yoloPlugins.h b/examples/multiple_inferences/pgie/nvdsinfer_custom_impl_Yolo/yoloPlugins.h new file mode 100644 index 0000000..ebf5661 --- /dev/null +++ b/examples/multiple_inferences/pgie/nvdsinfer_custom_impl_Yolo/yoloPlugins.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + + * Edited by Marcos Luciano + * https://www.github.com/marcoslucianops + */ + +#ifndef __YOLO_PLUGINS__ +#define __YOLO_PLUGINS__ + +#include +#include +#include +#include +#include + +#include + +#include "NvInferPlugin.h" + +#define CHECK(status) \ + { \ + if (status != 0) \ + { \ + std::cout << "CUDA failure: " << cudaGetErrorString(status) << " in file " << __FILE__ \ + << " at line " << __LINE__ << std::endl; \ + abort(); \ + } \ + } + +namespace +{ +const char* YOLOLAYER_PLUGIN_VERSION {"1"}; +const char* YOLOLAYER_PLUGIN_NAME {"YoloLayer_TRT"}; +} // namespace + +class YoloLayer : public nvinfer1::IPluginV2 +{ +public: + YoloLayer (const void* data, size_t length); + YoloLayer (const uint& numBoxes, const uint& numClasses, const uint& gridSize, + const uint model_type, const uint new_coords, const float scale_x_y, const float beta_nms, + const std::vector anchors, const std::vector> mask); + const char* getPluginType () const override { return YOLOLAYER_PLUGIN_NAME; } + const char* getPluginVersion () const override { return YOLOLAYER_PLUGIN_VERSION; } + int getNbOutputs () const override { return 1; } + + nvinfer1::Dims getOutputDimensions ( + int index, const nvinfer1::Dims* inputs, + int nbInputDims) override; + + bool supportsFormat ( + nvinfer1::DataType type, nvinfer1::PluginFormat format) const override; + + void configureWithFormat ( + const nvinfer1::Dims* inputDims, int nbInputs, + const nvinfer1::Dims* outputDims, int nbOutputs, + nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize) override; + + int initialize () override { return 0; } + void terminate () override {} + size_t getWorkspaceSize (int maxBatchSize) const override { return 0; } + int enqueue ( + int batchSize, const void* const* inputs, void** outputs, + void* workspace, cudaStream_t stream) override; + size_t getSerializationSize() const override; + void serialize (void* buffer) const override; + void destroy () override { delete this; } + nvinfer1::IPluginV2* clone() const override; + + void setPluginNamespace (const char* pluginNamespace)override { + m_Namespace = pluginNamespace; + } + virtual const char* getPluginNamespace () const override { + return m_Namespace.c_str(); + } + +private: + uint m_NumBoxes {0}; + uint m_NumClasses {0}; + uint m_GridSize {0}; + uint64_t m_OutputSize {0}; + std::string m_Namespace {""}; + + uint m_type {0}; + uint m_new_coords {0}; + float m_scale_x_y {0}; + float m_beta_nms {0}; + std::vector m_Anchors; + std::vector> m_Mask; +}; + +class YoloLayerPluginCreator : public nvinfer1::IPluginCreator +{ +public: + YoloLayerPluginCreator () {} + ~YoloLayerPluginCreator () {} + + const char* getPluginName () const override { return YOLOLAYER_PLUGIN_NAME; } + const char* getPluginVersion () const override { return YOLOLAYER_PLUGIN_VERSION; } + + const nvinfer1::PluginFieldCollection* getFieldNames() override { + std::cerr<< "YoloLayerPluginCreator::getFieldNames is not implemented" << std::endl; + return nullptr; + } + + nvinfer1::IPluginV2* createPlugin ( + const char* name, const nvinfer1::PluginFieldCollection* fc) override + { + std::cerr<< "YoloLayerPluginCreator::getFieldNames is not implemented"; + return nullptr; + } + + nvinfer1::IPluginV2* deserializePlugin ( + const char* name, const void* serialData, size_t serialLength) override + { + std::cout << "Deserialize yoloLayer plugin: " << name << std::endl; + return new YoloLayer(serialData, serialLength); + } + + void setPluginNamespace(const char* libNamespace) override { + m_Namespace = libNamespace; + } + const char* getPluginNamespace() const override { + return m_Namespace.c_str(); + } + +private: + std::string m_Namespace {""}; +}; + +extern int kNUM_CLASSES; +extern float kBETA_NMS; +extern std::vector kANCHORS; +extern std::vector> kMASK; + +#endif // __YOLO_PLUGINS__ \ No newline at end of file diff --git a/examples/multiple_inferences/sgie1/config_infer_secondary1.txt b/examples/multiple_inferences/sgie1/config_infer_secondary1.txt index 0011d02..632f387 100644 --- a/examples/multiple_inferences/sgie1/config_infer_secondary1.txt +++ b/examples/multiple_inferences/sgie1/config_infer_secondary1.txt @@ -2,9 +2,9 @@ gpu-id=0 net-scale-factor=0.0039215697906911373 model-color-format=0 -custom-network-config=yolo.cfg +custom-network-config=sgie1/yolo.cfg model-file=yolo.weights -model-engine-file=model_b1_gpu0_fp16.engine +model-engine-file=model_b16_gpu0_fp16.engine labelfile-path=labels.txt batch-size=16 network-mode=2 @@ -12,8 +12,6 @@ num-detected-classes=10 interval=0 gie-unique-id=2 process-mode=2 -#operate-on-gie-id=1 -#operate-on-class-ids=0 network-type=0 cluster-mode=4 maintain-aspect-ratio=0 diff --git a/examples/multiple_inferences/sgie1/nvdsinfer_custom_impl_Yolo/yoloPlugins.h b/examples/multiple_inferences/sgie1/nvdsinfer_custom_impl_Yolo/yoloPlugins.h new file mode 100644 index 0000000..94812ae --- /dev/null +++ b/examples/multiple_inferences/sgie1/nvdsinfer_custom_impl_Yolo/yoloPlugins.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + + * Edited by Marcos Luciano + * https://www.github.com/marcoslucianops + */ + +#ifndef __YOLO_PLUGINS__ +#define __YOLO_PLUGINS__ + +#include +#include +#include +#include +#include + +#include + +#include "NvInferPlugin.h" + +#define CHECK(status) \ + { \ + if (status != 0) \ + { \ + std::cout << "CUDA failure: " << cudaGetErrorString(status) << " in file " << __FILE__ \ + << " at line " << __LINE__ << std::endl; \ + abort(); \ + } \ + } + +namespace +{ +const char* YOLOLAYER_PLUGIN_VERSION {"2"}; +const char* YOLOLAYER_PLUGIN_NAME {"YoloLayer_TRT"}; +} // namespace + +class YoloLayer : public nvinfer1::IPluginV2 +{ +public: + YoloLayer (const void* data, size_t length); + YoloLayer (const uint& numBoxes, const uint& numClasses, const uint& gridSize, + const uint model_type, const uint new_coords, const float scale_x_y, const float beta_nms, + const std::vector anchors, const std::vector> mask); + const char* getPluginType () const override { return YOLOLAYER_PLUGIN_NAME; } + const char* getPluginVersion () const override { return YOLOLAYER_PLUGIN_VERSION; } + int getNbOutputs () const override { return 1; } + + nvinfer1::Dims getOutputDimensions ( + int index, const nvinfer1::Dims* inputs, + int nbInputDims) override; + + bool supportsFormat ( + nvinfer1::DataType type, nvinfer1::PluginFormat format) const override; + + void configureWithFormat ( + const nvinfer1::Dims* inputDims, int nbInputs, + const nvinfer1::Dims* outputDims, int nbOutputs, + nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize) override; + + int initialize () override { return 0; } + void terminate () override {} + size_t getWorkspaceSize (int maxBatchSize) const override { return 0; } + int enqueue ( + int batchSize, const void* const* inputs, void** outputs, + void* workspace, cudaStream_t stream) override; + size_t getSerializationSize() const override; + void serialize (void* buffer) const override; + void destroy () override { delete this; } + nvinfer1::IPluginV2* clone() const override; + + void setPluginNamespace (const char* pluginNamespace)override { + m_Namespace = pluginNamespace; + } + virtual const char* getPluginNamespace () const override { + return m_Namespace.c_str(); + } + +private: + uint m_NumBoxes {0}; + uint m_NumClasses {0}; + uint m_GridSize {0}; + uint64_t m_OutputSize {0}; + std::string m_Namespace {""}; + + uint m_type {0}; + uint m_new_coords {0}; + float m_scale_x_y {0}; + float m_beta_nms {0}; + std::vector m_Anchors; + std::vector> m_Mask; +}; + +class YoloLayerPluginCreator : public nvinfer1::IPluginCreator +{ +public: + YoloLayerPluginCreator () {} + ~YoloLayerPluginCreator () {} + + const char* getPluginName () const override { return YOLOLAYER_PLUGIN_NAME; } + const char* getPluginVersion () const override { return YOLOLAYER_PLUGIN_VERSION; } + + const nvinfer1::PluginFieldCollection* getFieldNames() override { + std::cerr<< "YoloLayerPluginCreator::getFieldNames is not implemented" << std::endl; + return nullptr; + } + + nvinfer1::IPluginV2* createPlugin ( + const char* name, const nvinfer1::PluginFieldCollection* fc) override + { + std::cerr<< "YoloLayerPluginCreator::getFieldNames is not implemented"; + return nullptr; + } + + nvinfer1::IPluginV2* deserializePlugin ( + const char* name, const void* serialData, size_t serialLength) override + { + std::cout << "Deserialize yoloLayer plugin: " << name << std::endl; + return new YoloLayer(serialData, serialLength); + } + + void setPluginNamespace(const char* libNamespace) override { + m_Namespace = libNamespace; + } + const char* getPluginNamespace() const override { + return m_Namespace.c_str(); + } + +private: + std::string m_Namespace {""}; +}; + +extern int kNUM_CLASSES; +extern float kBETA_NMS; +extern std::vector kANCHORS; +extern std::vector> kMASK; + +#endif // __YOLO_PLUGINS__ \ No newline at end of file diff --git a/multipleInferences.md b/multipleInferences.md index d4bb4c0..c4b62fe 100644 --- a/multipleInferences.md +++ b/multipleInferences.md @@ -27,6 +27,20 @@ CFLAGS+= -I../../../includes -I/usr/local/cuda-$(CUDA_VER)/include ## +### Editing yoloPlugins.h +To run deepstream-app without errors is necessary to edit yoloPlugins.h (line 51), in nvdsinfer_custom_impl_Yolo folder in each secondary inference directory. +``` +const char* YOLOLAYER_PLUGIN_VERSION {"1"}; +``` +To: +``` +const char* YOLOLAYER_PLUGIN_VERSION {"2"}; +``` + +Note: 2 = sgie1, 3 = sgie2, 4 = sgie3, etc + +## + ### Compiling edited models 1. Check your CUDA version (nvcc --version) 2. Go to inference directory. @@ -47,7 +61,6 @@ Example for 1 secondary-gie (2 inferences): enable=1 gpu-id=0 gie-unique-id=2 -# If you want secodary inference operate on specified GIE id (gie-unique-id you want to operate: 1, 2, etc; comment it if you don't want to use) operate-on-gie-id=1 # If you want secodary inference operate on specified class ids of GIE (class ids you want to operate: 1, 1;2, 2;3;4, 3 etc; comment it if you don't want to use) operate-on-class-ids=0 @@ -161,22 +174,6 @@ Example for all secondary: batch-size=16 ``` -## - -* If you want secodary inference operate on specified GIE id (gie-unique-id you want to operate: 1, 2, etc.) - -``` -operate-on-gie-id=1 -``` - -## - -* If you want secodary inference operate on specified class ids of GIE (class ids you want to operate: 1, 1;2, 2;3;4, 3 etc.) - -``` -operate-on-class-ids=0 -``` - ### Testing model To run your custom YOLO model, use this command