Added YOLOR native support

YOLOR-CSP
YOLOR-CSP*
YOLOR-CSP-X
YOLOR-CSP-X*
This commit is contained in:
unknown
2021-12-12 00:47:32 -03:00
parent 7761ca7a6b
commit e2257a81c0
12 changed files with 336 additions and 6 deletions

View File

@@ -53,6 +53,8 @@ SRCFILES:= nvdsinfer_yolo_engine.cpp \
nvdsparsebbox_Yolo.cpp \
yoloPlugins.cpp \
layers/convolutional_layer.cpp \
layers/implicit_layer.cpp \
layers/channels_layer.cpp \
layers/dropout_layer.cpp \
layers/shortcut_layer.cpp \
layers/route_layer.cpp \

View File

@@ -0,0 +1,32 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "channels_layer.h"
nvinfer1::ILayer* channelsLayer(
std::string type,
nvinfer1::ITensor* input,
nvinfer1::ITensor* implicitTensor,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ILayer* output;
if (type == "shift") {
nvinfer1::IElementWiseLayer* ew = network->addElementWise(
*input, *implicitTensor,
nvinfer1::ElementWiseOperation::kSUM);
assert(ew != nullptr);
output = ew;
}
else if (type == "control") {
nvinfer1::IElementWiseLayer* ew = network->addElementWise(
*input, *implicitTensor,
nvinfer1::ElementWiseOperation::kPROD);
assert(ew != nullptr);
output = ew;
}
return output;
}

View File

@@ -0,0 +1,20 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __CHANNELS_LAYER_H__
#define __CHANNELS_LAYER_H__
#include <map>
#include <cassert>
#include "NvInfer.h"
nvinfer1::ILayer* channelsLayer(
std::string type,
nvinfer1::ITensor* input,
nvinfer1::ITensor* implicitTensor,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -0,0 +1,31 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include <math.h>
#include "implicit_layer.h"
nvinfer1::ILayer* implicitLayer(
int channels,
std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights,
int& weightPtr,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::Weights convWt{nvinfer1::DataType::kFLOAT, nullptr, channels};
float* val = new float[channels];
for (int i = 0; i < channels; ++i)
{
val[i] = weights[weightPtr];
weightPtr++;
}
convWt.values = val;
trtWeights.push_back(convWt);
nvinfer1::IConstantLayer* implicit = network->addConstant(nvinfer1::Dims3{static_cast<int>(channels), 1, 1}, convWt);
assert(implicit != nullptr);
return implicit;
}

View File

@@ -0,0 +1,22 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __IMPLICIT_LAYER_H__
#define __IMPLICIT_LAYER_H__
#include <map>
#include <vector>
#include <cassert>
#include "NvInfer.h"
nvinfer1::ILayer* implicitLayer(
int channels,
std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights,
int& weightPtr,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -187,6 +187,51 @@ NvDsInferStatus Yolo::buildYoloNetwork(
printLayerInfo(layerIndex, layerType, inputVol, outputVol, std::to_string(weightPtr));
}
else if (m_ConfigBlocks.at(i).at("type") == "implicit_add" || m_ConfigBlocks.at(i).at("type") == "implicit_mul") {
std::string type;
if (m_ConfigBlocks.at(i).at("type") == "implicit_add") {
type = "add";
}
else if (m_ConfigBlocks.at(i).at("type") == "implicit_mul") {
type = "mul";
}
assert(m_ConfigBlocks.at(i).find("filters") != m_ConfigBlocks.at(i).end());
int filters = std::stoi(m_ConfigBlocks.at(i).at("filters"));
nvinfer1::ILayer* out = implicitLayer(filters, weights, m_TrtWeights, weightPtr, &network);
previous = out->getOutput(0);
assert(previous != nullptr);
channels = getNumChannels(previous);
std::string outputVol = dimsToString(previous->getDimensions());
tensorOutputs.push_back(previous);
std::string layerType = "implicit_" + type;
printLayerInfo(layerIndex, layerType, " -", outputVol, std::to_string(weightPtr));
}
else if (m_ConfigBlocks.at(i).at("type") == "shift_channels" || m_ConfigBlocks.at(i).at("type") == "control_channels") {
std::string type;
if (m_ConfigBlocks.at(i).at("type") == "shift_channels") {
type = "shift";
}
else if (m_ConfigBlocks.at(i).at("type") == "control_channels") {
type = "control";
}
assert(m_ConfigBlocks.at(i).find("from") != m_ConfigBlocks.at(i).end());
int from = stoi(m_ConfigBlocks.at(i).at("from"));
if (from > 0) {
from = from - i + 1;
}
assert((i - 2 >= 0) && (i - 2 < tensorOutputs.size()));
assert((i + from - 1 >= 0) && (i + from - 1 < tensorOutputs.size()));
assert(i + from - 1 < i - 2);
nvinfer1::ILayer* out = channelsLayer(type, previous, tensorOutputs[i + from - 1], &network);
previous = out->getOutput(0);
assert(previous != nullptr);
std::string outputVol = dimsToString(previous->getDimensions());
tensorOutputs.push_back(previous);
std::string layerType = type + "_channels" + ": " + std::to_string(i + from - 1);
printLayerInfo(layerIndex, layerType, " -", outputVol, " -");
}
else if (m_ConfigBlocks.at(i).at("type") == "dropout") {
assert(m_ConfigBlocks.at(i).find("probability") != m_ConfigBlocks.at(i).end());
//float probability = std::stof(m_ConfigBlocks.at(i).at("probability"));

View File

@@ -27,6 +27,8 @@
#define _YOLO_H_
#include "layers/convolutional_layer.h"
#include "layers/implicit_layer.h"
#include "layers/channels_layer.h"
#include "layers/dropout_layer.h"
#include "layers/shortcut_layer.h"
#include "layers/route_layer.h"

View File

@@ -60,6 +60,28 @@ __global__ void gpuYoloLayer(const float* input, float* output, const uint gridS
= input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))];
}
}
else if (new_coords == 0 && scale_x_y != 1) { // YOLOR incorrect param
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]) * 2.0 - 0.5;
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]) * 2.0 - 0.5;
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]
= pow(sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]) * 2, 2);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]
= pow(sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]) * 2, 2);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]);
for (uint i = 0; i < numOutputClasses; ++i)
{
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]);
}
}
else {
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]) * alpha + beta;