Added YOLOR native support
YOLOR-CSP YOLOR-CSP* YOLOR-CSP-X YOLOR-CSP-X*
This commit is contained in:
@@ -53,6 +53,8 @@ SRCFILES:= nvdsinfer_yolo_engine.cpp \
|
||||
nvdsparsebbox_Yolo.cpp \
|
||||
yoloPlugins.cpp \
|
||||
layers/convolutional_layer.cpp \
|
||||
layers/implicit_layer.cpp \
|
||||
layers/channels_layer.cpp \
|
||||
layers/dropout_layer.cpp \
|
||||
layers/shortcut_layer.cpp \
|
||||
layers/route_layer.cpp \
|
||||
|
||||
32
nvdsinfer_custom_impl_Yolo/layers/channels_layer.cpp
Normal file
32
nvdsinfer_custom_impl_Yolo/layers/channels_layer.cpp
Normal file
@@ -0,0 +1,32 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#include "channels_layer.h"
|
||||
|
||||
nvinfer1::ILayer* channelsLayer(
|
||||
std::string type,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* implicitTensor,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ILayer* output;
|
||||
|
||||
if (type == "shift") {
|
||||
nvinfer1::IElementWiseLayer* ew = network->addElementWise(
|
||||
*input, *implicitTensor,
|
||||
nvinfer1::ElementWiseOperation::kSUM);
|
||||
assert(ew != nullptr);
|
||||
output = ew;
|
||||
}
|
||||
else if (type == "control") {
|
||||
nvinfer1::IElementWiseLayer* ew = network->addElementWise(
|
||||
*input, *implicitTensor,
|
||||
nvinfer1::ElementWiseOperation::kPROD);
|
||||
assert(ew != nullptr);
|
||||
output = ew;
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
20
nvdsinfer_custom_impl_Yolo/layers/channels_layer.h
Normal file
20
nvdsinfer_custom_impl_Yolo/layers/channels_layer.h
Normal file
@@ -0,0 +1,20 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#ifndef __CHANNELS_LAYER_H__
|
||||
#define __CHANNELS_LAYER_H__
|
||||
|
||||
#include <map>
|
||||
#include <cassert>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ILayer* channelsLayer(
|
||||
std::string type,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* implicitTensor,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
31
nvdsinfer_custom_impl_Yolo/layers/implicit_layer.cpp
Normal file
31
nvdsinfer_custom_impl_Yolo/layers/implicit_layer.cpp
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
#include "implicit_layer.h"
|
||||
|
||||
nvinfer1::ILayer* implicitLayer(
|
||||
int channels,
|
||||
std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights,
|
||||
int& weightPtr,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::Weights convWt{nvinfer1::DataType::kFLOAT, nullptr, channels};
|
||||
|
||||
float* val = new float[channels];
|
||||
for (int i = 0; i < channels; ++i)
|
||||
{
|
||||
val[i] = weights[weightPtr];
|
||||
weightPtr++;
|
||||
}
|
||||
convWt.values = val;
|
||||
trtWeights.push_back(convWt);
|
||||
|
||||
nvinfer1::IConstantLayer* implicit = network->addConstant(nvinfer1::Dims3{static_cast<int>(channels), 1, 1}, convWt);
|
||||
assert(implicit != nullptr);
|
||||
|
||||
return implicit;
|
||||
}
|
||||
22
nvdsinfer_custom_impl_Yolo/layers/implicit_layer.h
Normal file
22
nvdsinfer_custom_impl_Yolo/layers/implicit_layer.h
Normal file
@@ -0,0 +1,22 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#ifndef __IMPLICIT_LAYER_H__
|
||||
#define __IMPLICIT_LAYER_H__
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ILayer* implicitLayer(
|
||||
int channels,
|
||||
std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights,
|
||||
int& weightPtr,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
@@ -187,6 +187,51 @@ NvDsInferStatus Yolo::buildYoloNetwork(
|
||||
printLayerInfo(layerIndex, layerType, inputVol, outputVol, std::to_string(weightPtr));
|
||||
}
|
||||
|
||||
else if (m_ConfigBlocks.at(i).at("type") == "implicit_add" || m_ConfigBlocks.at(i).at("type") == "implicit_mul") {
|
||||
std::string type;
|
||||
if (m_ConfigBlocks.at(i).at("type") == "implicit_add") {
|
||||
type = "add";
|
||||
}
|
||||
else if (m_ConfigBlocks.at(i).at("type") == "implicit_mul") {
|
||||
type = "mul";
|
||||
}
|
||||
assert(m_ConfigBlocks.at(i).find("filters") != m_ConfigBlocks.at(i).end());
|
||||
int filters = std::stoi(m_ConfigBlocks.at(i).at("filters"));
|
||||
nvinfer1::ILayer* out = implicitLayer(filters, weights, m_TrtWeights, weightPtr, &network);
|
||||
previous = out->getOutput(0);
|
||||
assert(previous != nullptr);
|
||||
channels = getNumChannels(previous);
|
||||
std::string outputVol = dimsToString(previous->getDimensions());
|
||||
tensorOutputs.push_back(previous);
|
||||
std::string layerType = "implicit_" + type;
|
||||
printLayerInfo(layerIndex, layerType, " -", outputVol, std::to_string(weightPtr));
|
||||
}
|
||||
|
||||
else if (m_ConfigBlocks.at(i).at("type") == "shift_channels" || m_ConfigBlocks.at(i).at("type") == "control_channels") {
|
||||
std::string type;
|
||||
if (m_ConfigBlocks.at(i).at("type") == "shift_channels") {
|
||||
type = "shift";
|
||||
}
|
||||
else if (m_ConfigBlocks.at(i).at("type") == "control_channels") {
|
||||
type = "control";
|
||||
}
|
||||
assert(m_ConfigBlocks.at(i).find("from") != m_ConfigBlocks.at(i).end());
|
||||
int from = stoi(m_ConfigBlocks.at(i).at("from"));
|
||||
if (from > 0) {
|
||||
from = from - i + 1;
|
||||
}
|
||||
assert((i - 2 >= 0) && (i - 2 < tensorOutputs.size()));
|
||||
assert((i + from - 1 >= 0) && (i + from - 1 < tensorOutputs.size()));
|
||||
assert(i + from - 1 < i - 2);
|
||||
nvinfer1::ILayer* out = channelsLayer(type, previous, tensorOutputs[i + from - 1], &network);
|
||||
previous = out->getOutput(0);
|
||||
assert(previous != nullptr);
|
||||
std::string outputVol = dimsToString(previous->getDimensions());
|
||||
tensorOutputs.push_back(previous);
|
||||
std::string layerType = type + "_channels" + ": " + std::to_string(i + from - 1);
|
||||
printLayerInfo(layerIndex, layerType, " -", outputVol, " -");
|
||||
}
|
||||
|
||||
else if (m_ConfigBlocks.at(i).at("type") == "dropout") {
|
||||
assert(m_ConfigBlocks.at(i).find("probability") != m_ConfigBlocks.at(i).end());
|
||||
//float probability = std::stof(m_ConfigBlocks.at(i).at("probability"));
|
||||
|
||||
@@ -27,6 +27,8 @@
|
||||
#define _YOLO_H_
|
||||
|
||||
#include "layers/convolutional_layer.h"
|
||||
#include "layers/implicit_layer.h"
|
||||
#include "layers/channels_layer.h"
|
||||
#include "layers/dropout_layer.h"
|
||||
#include "layers/shortcut_layer.h"
|
||||
#include "layers/route_layer.h"
|
||||
|
||||
@@ -60,6 +60,28 @@ __global__ void gpuYoloLayer(const float* input, float* output, const uint gridS
|
||||
= input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))];
|
||||
}
|
||||
}
|
||||
else if (new_coords == 0 && scale_x_y != 1) { // YOLOR incorrect param
|
||||
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]
|
||||
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]) * 2.0 - 0.5;
|
||||
|
||||
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]
|
||||
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]) * 2.0 - 0.5;
|
||||
|
||||
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]
|
||||
= pow(sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]) * 2, 2);
|
||||
|
||||
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]
|
||||
= pow(sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]) * 2, 2);
|
||||
|
||||
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]
|
||||
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]);
|
||||
|
||||
for (uint i = 0; i < numOutputClasses; ++i)
|
||||
{
|
||||
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]
|
||||
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]);
|
||||
}
|
||||
}
|
||||
else {
|
||||
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]
|
||||
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]) * alpha + beta;
|
||||
|
||||
Reference in New Issue
Block a user