DeepStream 6.0 update
This commit is contained in:
82
nvdsinfer_custom_impl_Yolo/layers/activation_layer.cpp
Normal file
82
nvdsinfer_custom_impl_Yolo/layers/activation_layer.cpp
Normal file
@@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#include "activation_layer.h"
|
||||
|
||||
nvinfer1::ILayer* activationLayer(
|
||||
int layerIdx,
|
||||
std::string activation,
|
||||
nvinfer1::ILayer* output,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
if (activation == "relu")
|
||||
{
|
||||
nvinfer1::IActivationLayer* relu = network->addActivation(
|
||||
*input, nvinfer1::ActivationType::kRELU);
|
||||
assert(relu != nullptr);
|
||||
std::string reluLayerName = "relu_" + std::to_string(layerIdx);
|
||||
relu->setName(reluLayerName.c_str());
|
||||
output = relu;
|
||||
}
|
||||
else if (activation == "sigmoid" || activation == "logistic")
|
||||
{
|
||||
nvinfer1::IActivationLayer* sigmoid = network->addActivation(
|
||||
*input, nvinfer1::ActivationType::kSIGMOID);
|
||||
assert(sigmoid != nullptr);
|
||||
std::string sigmoidLayerName = "sigmoid_" + std::to_string(layerIdx);
|
||||
sigmoid->setName(sigmoidLayerName.c_str());
|
||||
output = sigmoid;
|
||||
}
|
||||
else if (activation == "tanh")
|
||||
{
|
||||
nvinfer1::IActivationLayer* tanh = network->addActivation(
|
||||
*input, nvinfer1::ActivationType::kTANH);
|
||||
assert(tanh != nullptr);
|
||||
std::string tanhLayerName = "tanh_" + std::to_string(layerIdx);
|
||||
tanh->setName(tanhLayerName.c_str());
|
||||
output = tanh;
|
||||
}
|
||||
else if (activation == "leaky")
|
||||
{
|
||||
nvinfer1::IActivationLayer* leaky = network->addActivation(
|
||||
*input, nvinfer1::ActivationType::kLEAKY_RELU);
|
||||
leaky->setAlpha(0.1);
|
||||
assert(leaky != nullptr);
|
||||
std::string leakyLayerName = "leaky_" + std::to_string(layerIdx);
|
||||
leaky->setName(leakyLayerName.c_str());
|
||||
output = leaky;
|
||||
}
|
||||
else if (activation == "softplus")
|
||||
{
|
||||
nvinfer1::IActivationLayer* softplus = network->addActivation(
|
||||
*input, nvinfer1::ActivationType::kSOFTPLUS);
|
||||
assert(softplus != nullptr);
|
||||
std::string softplusLayerName = "softplus_" + std::to_string(layerIdx);
|
||||
softplus->setName(softplusLayerName.c_str());
|
||||
output = softplus;
|
||||
}
|
||||
else if (activation == "mish")
|
||||
{
|
||||
nvinfer1::IActivationLayer* softplus = network->addActivation(
|
||||
*input, nvinfer1::ActivationType::kSOFTPLUS);
|
||||
assert(softplus != nullptr);
|
||||
std::string softplusLayerName = "softplus_" + std::to_string(layerIdx);
|
||||
softplus->setName(softplusLayerName.c_str());
|
||||
nvinfer1::IActivationLayer* tanh = network->addActivation(
|
||||
*softplus->getOutput(0), nvinfer1::ActivationType::kTANH);
|
||||
assert(tanh != nullptr);
|
||||
std::string tanhLayerName = "tanh_" + std::to_string(layerIdx);
|
||||
tanh->setName(tanhLayerName.c_str());
|
||||
nvinfer1::IElementWiseLayer* mish = network->addElementWise(
|
||||
*tanh->getOutput(0), *input,
|
||||
nvinfer1::ElementWiseOperation::kPROD);
|
||||
assert(mish != nullptr);
|
||||
std::string mishLayerName = "mish_" + std::to_string(layerIdx);
|
||||
mish->setName(mishLayerName.c_str());
|
||||
output = mish;
|
||||
}
|
||||
return output;
|
||||
}
|
||||
23
nvdsinfer_custom_impl_Yolo/layers/activation_layer.h
Normal file
23
nvdsinfer_custom_impl_Yolo/layers/activation_layer.h
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#ifndef __ACTIVATION_LAYER_H__
|
||||
#define __ACTIVATION_LAYER_H__
|
||||
|
||||
#include <string>
|
||||
#include <cassert>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
#include "activation_layer.h"
|
||||
|
||||
nvinfer1::ILayer* activationLayer(
|
||||
int layerIdx,
|
||||
std::string activation,
|
||||
nvinfer1::ILayer* output,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
168
nvdsinfer_custom_impl_Yolo/layers/convolutional_layer.cpp
Normal file
168
nvdsinfer_custom_impl_Yolo/layers/convolutional_layer.cpp
Normal file
@@ -0,0 +1,168 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
#include "convolutional_layer.h"
|
||||
|
||||
nvinfer1::ILayer* convolutionalLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights,
|
||||
int& weightPtr,
|
||||
int& inputChannels,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
assert(block.at("type") == "convolutional");
|
||||
assert(block.find("filters") != block.end());
|
||||
assert(block.find("pad") != block.end());
|
||||
assert(block.find("size") != block.end());
|
||||
assert(block.find("stride") != block.end());
|
||||
|
||||
int filters = std::stoi(block.at("filters"));
|
||||
int padding = std::stoi(block.at("pad"));
|
||||
int kernelSize = std::stoi(block.at("size"));
|
||||
int stride = std::stoi(block.at("stride"));
|
||||
std::string activation = block.at("activation");
|
||||
int bias = filters;
|
||||
|
||||
bool batchNormalize = false;
|
||||
if (block.find("batch_normalize") != block.end())
|
||||
{
|
||||
bias = 0;
|
||||
batchNormalize = (block.at("batch_normalize") == "1");
|
||||
}
|
||||
|
||||
int groups = 1;
|
||||
if (block.find("groups") != block.end())
|
||||
{
|
||||
groups = std::stoi(block.at("groups"));
|
||||
}
|
||||
|
||||
int pad;
|
||||
if (padding)
|
||||
pad = (kernelSize - 1) / 2;
|
||||
else
|
||||
pad = 0;
|
||||
|
||||
int size = filters * inputChannels * kernelSize * kernelSize / groups;
|
||||
std::vector<float> bnBiases;
|
||||
std::vector<float> bnWeights;
|
||||
std::vector<float> bnRunningMean;
|
||||
std::vector<float> bnRunningVar;
|
||||
nvinfer1::Weights convWt{nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
nvinfer1::Weights convBias{nvinfer1::DataType::kFLOAT, nullptr, bias};
|
||||
|
||||
if (batchNormalize == false)
|
||||
{
|
||||
float* val = new float[filters];
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
val[i] = weights[weightPtr];
|
||||
weightPtr++;
|
||||
}
|
||||
convBias.values = val;
|
||||
trtWeights.push_back(convBias);
|
||||
val = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
val[i] = weights[weightPtr];
|
||||
weightPtr++;
|
||||
}
|
||||
convWt.values = val;
|
||||
trtWeights.push_back(convWt);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnBiases.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnWeights.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnRunningMean.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnRunningVar.push_back(sqrt(weights[weightPtr] + 1.0e-5));
|
||||
weightPtr++;
|
||||
}
|
||||
float* val = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
val[i] = weights[weightPtr];
|
||||
weightPtr++;
|
||||
}
|
||||
convWt.values = val;
|
||||
trtWeights.push_back(convWt);
|
||||
trtWeights.push_back(convBias);
|
||||
}
|
||||
|
||||
nvinfer1::IConvolutionLayer* conv = network->addConvolution(
|
||||
*input, filters, nvinfer1::DimsHW{kernelSize, kernelSize}, convWt, convBias);
|
||||
assert(conv != nullptr);
|
||||
std::string convLayerName = "conv_" + std::to_string(layerIdx);
|
||||
conv->setName(convLayerName.c_str());
|
||||
conv->setStride(nvinfer1::DimsHW{stride, stride});
|
||||
conv->setPadding(nvinfer1::DimsHW{pad, pad});
|
||||
|
||||
if (block.find("groups") != block.end())
|
||||
{
|
||||
conv->setNbGroups(groups);
|
||||
}
|
||||
|
||||
nvinfer1::ILayer* output = conv;
|
||||
|
||||
if (batchNormalize == true)
|
||||
{
|
||||
size = filters;
|
||||
nvinfer1::Weights shift{nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
nvinfer1::Weights scale{nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
nvinfer1::Weights power{nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
float* shiftWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
shiftWt[i]
|
||||
= bnBiases.at(i) - ((bnRunningMean.at(i) * bnWeights.at(i)) / bnRunningVar.at(i));
|
||||
}
|
||||
shift.values = shiftWt;
|
||||
float* scaleWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
scaleWt[i] = bnWeights.at(i) / bnRunningVar[i];
|
||||
}
|
||||
scale.values = scaleWt;
|
||||
float* powerWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
powerWt[i] = 1.0;
|
||||
}
|
||||
power.values = powerWt;
|
||||
trtWeights.push_back(shift);
|
||||
trtWeights.push_back(scale);
|
||||
trtWeights.push_back(power);
|
||||
|
||||
nvinfer1::IScaleLayer* bn = network->addScale(
|
||||
*output->getOutput(0), nvinfer1::ScaleMode::kCHANNEL, shift, scale, power);
|
||||
assert(bn != nullptr);
|
||||
std::string bnLayerName = "batch_norm_" + std::to_string(layerIdx);
|
||||
bn->setName(bnLayerName.c_str());
|
||||
output = bn;
|
||||
}
|
||||
|
||||
output = activationLayer(layerIdx, activation, output, output->getOutput(0), network);
|
||||
assert(output != nullptr);
|
||||
|
||||
return output;
|
||||
}
|
||||
26
nvdsinfer_custom_impl_Yolo/layers/convolutional_layer.h
Normal file
26
nvdsinfer_custom_impl_Yolo/layers/convolutional_layer.h
Normal file
@@ -0,0 +1,26 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#ifndef __CONVOLUTIONAL_LAYER_H__
|
||||
#define __CONVOLUTIONAL_LAYER_H__
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
#include "activation_layer.h"
|
||||
|
||||
nvinfer1::ILayer* convolutionalLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights,
|
||||
int& weightPtr,
|
||||
int& inputChannels,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
15
nvdsinfer_custom_impl_Yolo/layers/dropout_layer.cpp
Normal file
15
nvdsinfer_custom_impl_Yolo/layers/dropout_layer.cpp
Normal file
@@ -0,0 +1,15 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#include "dropout_layer.h"
|
||||
|
||||
nvinfer1::ILayer* dropoutLayer(
|
||||
float probability,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ILayer* output;
|
||||
return output;
|
||||
}
|
||||
16
nvdsinfer_custom_impl_Yolo/layers/dropout_layer.h
Normal file
16
nvdsinfer_custom_impl_Yolo/layers/dropout_layer.h
Normal file
@@ -0,0 +1,16 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#ifndef __DROPOUT_LAYER_H__
|
||||
#define __DROPOUT_LAYER_H__
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ILayer* dropoutLayer(
|
||||
float probability,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
30
nvdsinfer_custom_impl_Yolo/layers/maxpool_layer.cpp
Normal file
30
nvdsinfer_custom_impl_Yolo/layers/maxpool_layer.cpp
Normal file
@@ -0,0 +1,30 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#include "maxpool_layer.h"
|
||||
|
||||
nvinfer1::ILayer* maxpoolLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
assert(block.at("type") == "maxpool");
|
||||
assert(block.find("size") != block.end());
|
||||
assert(block.find("stride") != block.end());
|
||||
|
||||
int size = std::stoi(block.at("size"));
|
||||
int stride = std::stoi(block.at("stride"));
|
||||
|
||||
nvinfer1::IPoolingLayer* pool
|
||||
= network->addPooling(*input, nvinfer1::PoolingType::kMAX, nvinfer1::DimsHW{size, size});
|
||||
assert(pool);
|
||||
std::string maxpoolLayerName = "maxpool_" + std::to_string(layerIdx);
|
||||
pool->setStride(nvinfer1::DimsHW{stride, stride});
|
||||
pool->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
|
||||
pool->setName(maxpoolLayerName.c_str());
|
||||
|
||||
return pool;
|
||||
}
|
||||
20
nvdsinfer_custom_impl_Yolo/layers/maxpool_layer.h
Normal file
20
nvdsinfer_custom_impl_Yolo/layers/maxpool_layer.h
Normal file
@@ -0,0 +1,20 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#ifndef __MAXPOOL_LAYER_H__
|
||||
#define __MAXPOOL_LAYER_H__
|
||||
|
||||
#include <map>
|
||||
#include <cassert>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ILayer* maxpoolLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
63
nvdsinfer_custom_impl_Yolo/layers/route_layer.cpp
Normal file
63
nvdsinfer_custom_impl_Yolo/layers/route_layer.cpp
Normal file
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#include "route_layer.h"
|
||||
|
||||
nvinfer1::ILayer* routeLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
std::vector<nvinfer1::ITensor*> tensorOutputs,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
std::string strLayers = block.at("layers");
|
||||
std::vector<int> idxLayers;
|
||||
size_t lastPos = 0, pos = 0;
|
||||
while ((pos = strLayers.find(',', lastPos)) != std::string::npos) {
|
||||
int vL = std::stoi(trim(strLayers.substr(lastPos, pos - lastPos)));
|
||||
idxLayers.push_back (vL);
|
||||
lastPos = pos + 1;
|
||||
}
|
||||
if (lastPos < strLayers.length()) {
|
||||
std::string lastV = trim(strLayers.substr(lastPos));
|
||||
if (!lastV.empty()) {
|
||||
idxLayers.push_back (std::stoi(lastV));
|
||||
}
|
||||
}
|
||||
assert (!idxLayers.empty());
|
||||
std::vector<nvinfer1::ITensor*> concatInputs;
|
||||
for (int idxLayer : idxLayers) {
|
||||
if (idxLayer < 0) {
|
||||
idxLayer = tensorOutputs.size() + idxLayer;
|
||||
}
|
||||
assert (idxLayer >= 0 && idxLayer < (int)tensorOutputs.size());
|
||||
concatInputs.push_back (tensorOutputs[idxLayer]);
|
||||
}
|
||||
|
||||
nvinfer1::IConcatenationLayer* concat =
|
||||
network->addConcatenation(concatInputs.data(), concatInputs.size());
|
||||
assert(concat != nullptr);
|
||||
std::string concatLayerName = "route_" + std::to_string(layerIdx - 1);
|
||||
concat->setName(concatLayerName.c_str());
|
||||
concat->setAxis(0);
|
||||
|
||||
nvinfer1::ILayer* output = concat;
|
||||
|
||||
if (block.find("groups") != block.end()) {
|
||||
nvinfer1::Dims prevTensorDims = output->getOutput(0)->getDimensions();
|
||||
int groups = stoi(block.at("groups"));
|
||||
int group_id = stoi(block.at("group_id"));
|
||||
int startSlice = (prevTensorDims.d[0] / groups) * group_id;
|
||||
int channelSlice = (prevTensorDims.d[0] / groups);
|
||||
nvinfer1::ISliceLayer* sl = network->addSlice(
|
||||
*output->getOutput(0),
|
||||
nvinfer1::Dims3{startSlice, 0, 0},
|
||||
nvinfer1::Dims3{channelSlice, prevTensorDims.d[1], prevTensorDims.d[2]},
|
||||
nvinfer1::Dims3{1, 1, 1});
|
||||
assert(sl != nullptr);
|
||||
output = sl;
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
18
nvdsinfer_custom_impl_Yolo/layers/route_layer.h
Normal file
18
nvdsinfer_custom_impl_Yolo/layers/route_layer.h
Normal file
@@ -0,0 +1,18 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#ifndef __ROUTE_LAYER_H__
|
||||
#define __ROUTE_LAYER_H__
|
||||
|
||||
#include "NvInfer.h"
|
||||
#include "../utils.h"
|
||||
|
||||
nvinfer1::ILayer* routeLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
std::vector<nvinfer1::ITensor*> tensorOutputs,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
45
nvdsinfer_custom_impl_Yolo/layers/shortcut_layer.cpp
Normal file
45
nvdsinfer_custom_impl_Yolo/layers/shortcut_layer.cpp
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#include "shortcut_layer.h"
|
||||
|
||||
nvinfer1::ILayer* shortcutLayer(
|
||||
int layerIdx,
|
||||
std::string activation,
|
||||
std::string inputVol,
|
||||
std::string shortcutVol,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* shortcutTensor,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ILayer* output;
|
||||
nvinfer1::ITensor* outputTensor;
|
||||
|
||||
if (inputVol != shortcutVol)
|
||||
{
|
||||
nvinfer1::ISliceLayer* sl = network->addSlice(
|
||||
*shortcutTensor,
|
||||
nvinfer1::Dims3{0, 0, 0},
|
||||
input->getDimensions(),
|
||||
nvinfer1::Dims3{1, 1, 1});
|
||||
assert(sl != nullptr);
|
||||
outputTensor = sl->getOutput(0);
|
||||
assert(outputTensor != nullptr);
|
||||
} else
|
||||
{
|
||||
outputTensor = shortcutTensor;
|
||||
assert(outputTensor != nullptr);
|
||||
}
|
||||
|
||||
nvinfer1::IElementWiseLayer* ew = network->addElementWise(
|
||||
*input, *outputTensor,
|
||||
nvinfer1::ElementWiseOperation::kSUM);
|
||||
assert(ew != nullptr);
|
||||
|
||||
output = activationLayer(layerIdx, activation, ew, ew->getOutput(0), network);
|
||||
assert(output != nullptr);
|
||||
|
||||
return output;
|
||||
}
|
||||
22
nvdsinfer_custom_impl_Yolo/layers/shortcut_layer.h
Normal file
22
nvdsinfer_custom_impl_Yolo/layers/shortcut_layer.h
Normal file
@@ -0,0 +1,22 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#ifndef __SHORTCUT_LAYER_H__
|
||||
#define __SHORTCUT_LAYER_H__
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
#include "activation_layer.h"
|
||||
|
||||
nvinfer1::ILayer* shortcutLayer(
|
||||
int layerIdx,
|
||||
std::string activation,
|
||||
std::string inputVol,
|
||||
std::string shortcutVol,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* shortcutTensor,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
24
nvdsinfer_custom_impl_Yolo/layers/upsample_layer.cpp
Normal file
24
nvdsinfer_custom_impl_Yolo/layers/upsample_layer.cpp
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#include "upsample_layer.h"
|
||||
|
||||
nvinfer1::ILayer* upsampleLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
assert(block.at("type") == "upsample");
|
||||
int stride = std::stoi(block.at("stride"));
|
||||
|
||||
nvinfer1::IResizeLayer* resize_layer = network->addResize(*input);
|
||||
resize_layer->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
|
||||
float scale[3] = {1, stride, stride};
|
||||
resize_layer->setScales(scale, 3);
|
||||
std::string layer_name = "upsample_" + std::to_string(layerIdx);
|
||||
resize_layer->setName(layer_name.c_str());
|
||||
return resize_layer;
|
||||
}
|
||||
21
nvdsinfer_custom_impl_Yolo/layers/upsample_layer.h
Normal file
21
nvdsinfer_custom_impl_Yolo/layers/upsample_layer.h
Normal file
@@ -0,0 +1,21 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#ifndef __UPSAMPLE_LAYER_H__
|
||||
#define __UPSAMPLE_LAYER_H__
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ILayer* upsampleLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
Reference in New Issue
Block a user