Add PP-YOLOE support

This commit is contained in:
Marcos Luciano
2022-07-24 18:00:47 -03:00
parent d09879d557
commit a3782ed65e
51 changed files with 1812 additions and 600 deletions

View File

@@ -5,114 +5,113 @@
#include "activation_layer.h"
nvinfer1::ILayer* activationLayer(
nvinfer1::ITensor* activationLayer(
int layerIdx,
std::string activation,
nvinfer1::ILayer* output,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
if (activation == "linear")
{
// Pass
output = input;
}
else if (activation == "relu")
{
nvinfer1::IActivationLayer* relu = network->addActivation(
*input, nvinfer1::ActivationType::kRELU);
nvinfer1::IActivationLayer* relu = network->addActivation(*input, nvinfer1::ActivationType::kRELU);
assert(relu != nullptr);
std::string reluLayerName = "relu_" + std::to_string(layerIdx);
relu->setName(reluLayerName.c_str());
output = relu;
output = relu->getOutput(0);
}
else if (activation == "sigmoid" || activation == "logistic")
{
nvinfer1::IActivationLayer* sigmoid = network->addActivation(
*input, nvinfer1::ActivationType::kSIGMOID);
nvinfer1::IActivationLayer* sigmoid = network->addActivation(*input, nvinfer1::ActivationType::kSIGMOID);
assert(sigmoid != nullptr);
std::string sigmoidLayerName = "sigmoid_" + std::to_string(layerIdx);
sigmoid->setName(sigmoidLayerName.c_str());
output = sigmoid;
output = sigmoid->getOutput(0);
}
else if (activation == "tanh")
{
nvinfer1::IActivationLayer* tanh = network->addActivation(
*input, nvinfer1::ActivationType::kTANH);
nvinfer1::IActivationLayer* tanh = network->addActivation(*input, nvinfer1::ActivationType::kTANH);
assert(tanh != nullptr);
std::string tanhLayerName = "tanh_" + std::to_string(layerIdx);
tanh->setName(tanhLayerName.c_str());
output = tanh;
output = tanh->getOutput(0);
}
else if (activation == "leaky")
{
nvinfer1::IActivationLayer* leaky = network->addActivation(
*input, nvinfer1::ActivationType::kLEAKY_RELU);
nvinfer1::IActivationLayer* leaky = network->addActivation(*input, nvinfer1::ActivationType::kLEAKY_RELU);
assert(leaky != nullptr);
leaky->setAlpha(0.1);
std::string leakyLayerName = "leaky_" + std::to_string(layerIdx);
leaky->setName(leakyLayerName.c_str());
output = leaky;
leaky->setAlpha(0.1);
output = leaky->getOutput(0);
}
else if (activation == "softplus")
{
nvinfer1::IActivationLayer* softplus = network->addActivation(
*input, nvinfer1::ActivationType::kSOFTPLUS);
nvinfer1::IActivationLayer* softplus = network->addActivation(*input, nvinfer1::ActivationType::kSOFTPLUS);
assert(softplus != nullptr);
std::string softplusLayerName = "softplus_" + std::to_string(layerIdx);
softplus->setName(softplusLayerName.c_str());
output = softplus;
output = softplus->getOutput(0);
}
else if (activation == "mish")
{
nvinfer1::IActivationLayer* softplus = network->addActivation(
*input, nvinfer1::ActivationType::kSOFTPLUS);
nvinfer1::IActivationLayer* softplus = network->addActivation(*input, nvinfer1::ActivationType::kSOFTPLUS);
assert(softplus != nullptr);
std::string softplusLayerName = "softplus_" + std::to_string(layerIdx);
softplus->setName(softplusLayerName.c_str());
nvinfer1::IActivationLayer* tanh = network->addActivation(
*softplus->getOutput(0), nvinfer1::ActivationType::kTANH);
nvinfer1::IActivationLayer* tanh = network->addActivation(*softplus->getOutput(0), nvinfer1::ActivationType::kTANH);
assert(tanh != nullptr);
std::string tanhLayerName = "tanh_" + std::to_string(layerIdx);
tanh->setName(tanhLayerName.c_str());
nvinfer1::IElementWiseLayer* mish = network->addElementWise(
*input, *tanh->getOutput(0),
nvinfer1::ElementWiseOperation::kPROD);
nvinfer1::IElementWiseLayer* mish
= network->addElementWise(*input, *tanh->getOutput(0), nvinfer1::ElementWiseOperation::kPROD);
assert(mish != nullptr);
std::string mishLayerName = "mish_" + std::to_string(layerIdx);
mish->setName(mishLayerName.c_str());
output = mish;
output = mish->getOutput(0);
}
else if (activation == "silu" || activation == "swish")
{
nvinfer1::IActivationLayer* sigmoid = network->addActivation(
*input, nvinfer1::ActivationType::kSIGMOID);
nvinfer1::IActivationLayer* sigmoid = network->addActivation(*input, nvinfer1::ActivationType::kSIGMOID);
assert(sigmoid != nullptr);
std::string sigmoidLayerName = "sigmoid_" + std::to_string(layerIdx);
sigmoid->setName(sigmoidLayerName.c_str());
nvinfer1::IElementWiseLayer* silu = network->addElementWise(
*input, *sigmoid->getOutput(0),
nvinfer1::ElementWiseOperation::kPROD);
nvinfer1::IElementWiseLayer* silu
= network->addElementWise(*input, *sigmoid->getOutput(0), nvinfer1::ElementWiseOperation::kPROD);
assert(silu != nullptr);
std::string siluLayerName = "silu_" + std::to_string(layerIdx);
silu->setName(siluLayerName.c_str());
output = silu;
output = silu->getOutput(0);
}
else if (activation == "hardsigmoid")
{
nvinfer1::IActivationLayer* hardsigmoid = network->addActivation(*input, nvinfer1::ActivationType::kHARD_SIGMOID);
assert(hardsigmoid != nullptr);
std::string hardsigmoidLayerName = "hardsigmoid_" + std::to_string(layerIdx);
hardsigmoid->setName(hardsigmoidLayerName.c_str());
hardsigmoid->setAlpha(1.0 / 6.0);
hardsigmoid->setBeta(0.5);
output = hardsigmoid->getOutput(0);
}
else if (activation == "hardswish")
{
nvinfer1::IActivationLayer* hard_sigmoid = network->addActivation(
*input, nvinfer1::ActivationType::kHARD_SIGMOID);
assert(hard_sigmoid != nullptr);
hard_sigmoid->setAlpha(1.0 / 6.0);
hard_sigmoid->setBeta(0.5);
std::string hardSigmoidLayerName = "hard_sigmoid_" + std::to_string(layerIdx);
hard_sigmoid->setName(hardSigmoidLayerName.c_str());
nvinfer1::IElementWiseLayer* hard_swish = network->addElementWise(
*input, *hard_sigmoid->getOutput(0),
nvinfer1::ElementWiseOperation::kPROD);
assert(hard_swish != nullptr);
std::string hardSwishLayerName = "hard_swish_" + std::to_string(layerIdx);
hard_swish->setName(hardSwishLayerName.c_str());
output = hard_swish;
nvinfer1::IActivationLayer* hardsigmoid = network->addActivation(*input, nvinfer1::ActivationType::kHARD_SIGMOID);
assert(hardsigmoid != nullptr);
std::string hardsigmoidLayerName = "hardsigmoid_" + std::to_string(layerIdx);
hardsigmoid->setName(hardsigmoidLayerName.c_str());
hardsigmoid->setAlpha(1.0 / 6.0);
hardsigmoid->setBeta(0.5);
nvinfer1::IElementWiseLayer* hardswish
= network->addElementWise(*input, *hardsigmoid->getOutput(0), nvinfer1::ElementWiseOperation::kPROD);
assert(hardswish != nullptr);
std::string hardswishLayerName = "hardswish_" + std::to_string(layerIdx);
hardswish->setName(hardswishLayerName.c_str());
output = hardswish->getOutput(0);
}
else
{

View File

@@ -6,18 +6,14 @@
#ifndef __ACTIVATION_LAYER_H__
#define __ACTIVATION_LAYER_H__
#include <string>
#include <cassert>
#include <iostream>
#include "NvInfer.h"
#include "activation_layer.h"
nvinfer1::ILayer* activationLayer(
nvinfer1::ITensor* activationLayer(
int layerIdx,
std::string activation,
nvinfer1::ILayer* output,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);

View File

@@ -6,7 +6,7 @@
#include <math.h>
#include "batchnorm_layer.h"
nvinfer1::ILayer* batchnormLayer(
nvinfer1::ITensor* batchnormLayer(
int layerIdx,
std::map<std::string, std::string>& block,
std::vector<float>& weights,
@@ -17,6 +17,8 @@ nvinfer1::ILayer* batchnormLayer(
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "batchnorm");
assert(block.find("filters") != block.end());
@@ -28,7 +30,8 @@ nvinfer1::ILayer* batchnormLayer(
std::vector<float> bnRunningMean;
std::vector<float> bnRunningVar;
if (weightsType == "weights") {
if (weightsType == "weights")
{
for (int i = 0; i < filters; ++i)
{
bnBiases.push_back(weights[weightPtr]);
@@ -50,7 +53,8 @@ nvinfer1::ILayer* batchnormLayer(
weightPtr++;
}
}
else {
else
{
for (int i = 0; i < filters; ++i)
{
bnWeights.push_back(weights[weightPtr]);
@@ -79,35 +83,27 @@ nvinfer1::ILayer* batchnormLayer(
nvinfer1::Weights power{nvinfer1::DataType::kFLOAT, nullptr, size};
float* shiftWt = new float[size];
for (int i = 0; i < size; ++i)
{
shiftWt[i]
= bnBiases.at(i) - ((bnRunningMean.at(i) * bnWeights.at(i)) / bnRunningVar.at(i));
}
shiftWt[i] = bnBiases.at(i) - ((bnRunningMean.at(i) * bnWeights.at(i)) / bnRunningVar.at(i));
shift.values = shiftWt;
float* scaleWt = new float[size];
for (int i = 0; i < size; ++i)
{
scaleWt[i] = bnWeights.at(i) / bnRunningVar[i];
}
scale.values = scaleWt;
float* powerWt = new float[size];
for (int i = 0; i < size; ++i)
{
powerWt[i] = 1.0;
}
power.values = powerWt;
trtWeights.push_back(shift);
trtWeights.push_back(scale);
trtWeights.push_back(power);
nvinfer1::IScaleLayer* bn = network->addScale(
*input, nvinfer1::ScaleMode::kCHANNEL, shift, scale, power);
assert(bn != nullptr);
std::string bnLayerName = "batch_norm_" + std::to_string(layerIdx);
bn->setName(bnLayerName.c_str());
nvinfer1::ILayer* output = bn;
nvinfer1::IScaleLayer* batchnorm = network->addScale(*input, nvinfer1::ScaleMode::kCHANNEL, shift, scale, power);
assert(batchnorm != nullptr);
std::string batchnormLayerName = "batchnorm_" + std::to_string(layerIdx);
batchnorm->setName(batchnormLayerName.c_str());
output = batchnorm->getOutput(0);
output = activationLayer(layerIdx, activation, output, output->getOutput(0), network);
output = activationLayer(layerIdx, activation, output, network);
assert(output != nullptr);
return output;

View File

@@ -13,7 +13,7 @@
#include "activation_layer.h"
nvinfer1::ILayer* batchnormLayer(
nvinfer1::ITensor* batchnormLayer(
int layerIdx,
std::map<std::string, std::string>& block,
std::vector<float>& weights,

View File

@@ -5,27 +5,32 @@
#include "channels_layer.h"
nvinfer1::ILayer* channelsLayer(
std::string type,
nvinfer1::ITensor* channelsLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::ITensor* implicitTensor,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ILayer* output;
nvinfer1::ITensor* output;
if (type == "shift") {
nvinfer1::IElementWiseLayer* ew = network->addElementWise(
*input, *implicitTensor,
nvinfer1::ElementWiseOperation::kSUM);
assert(ew != nullptr);
output = ew;
assert(block.at("type") == "shift_channels" || block.at("type") == "control_channels");
if (block.at("type") == "shift_channels") {
nvinfer1::IElementWiseLayer* shift
= network->addElementWise(*input, *implicitTensor, nvinfer1::ElementWiseOperation::kSUM);
assert(shift != nullptr);
std::string shiftLayerName = "shift_channels_" + std::to_string(layerIdx);
shift->setName(shiftLayerName.c_str());
output = shift->getOutput(0);
}
else if (type == "control") {
nvinfer1::IElementWiseLayer* ew = network->addElementWise(
*input, *implicitTensor,
nvinfer1::ElementWiseOperation::kPROD);
assert(ew != nullptr);
output = ew;
else if (block.at("type") == "control_channels") {
nvinfer1::IElementWiseLayer* control
= network->addElementWise(*input, *implicitTensor, nvinfer1::ElementWiseOperation::kPROD);
assert(control != nullptr);
std::string controlLayerName = "control_channels_" + std::to_string(layerIdx);
control->setName(controlLayerName.c_str());
output = control->getOutput(0);
}
return output;

View File

@@ -11,8 +11,9 @@
#include "NvInfer.h"
nvinfer1::ILayer* channelsLayer(
std::string type,
nvinfer1::ITensor* channelsLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::ITensor* implicitTensor,
nvinfer1::INetworkDefinition* network);

View File

@@ -0,0 +1,29 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "cls_layer.h"
nvinfer1::ITensor* clsLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "cls");
nvinfer1::IShuffleLayer* shuffle = network->addShuffle(*input);
assert(shuffle != nullptr);
std::string shuffleLayerName = "shuffle_" + std::to_string(layerIdx);
shuffle->setName(shuffleLayerName.c_str());
nvinfer1::Permutation permutation;
permutation.order[0] = 1;
permutation.order[1] = 0;
shuffle->setFirstTranspose(permutation);
output = shuffle->getOutput(0);
return output;
}

View File

@@ -3,15 +3,15 @@
* https://www.github.com/marcoslucianops
*/
#ifndef __MAXPOOL_LAYER_H__
#define __MAXPOOL_LAYER_H__
#ifndef __CLS_LAYER_H__
#define __CLS_LAYER_H__
#include <map>
#include <cassert>
#include "NvInfer.h"
nvinfer1::ILayer* maxpoolLayer(
nvinfer1::ITensor* clsLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,

View File

@@ -6,7 +6,7 @@
#include <math.h>
#include "convolutional_layer.h"
nvinfer1::ILayer* convolutionalLayer(
nvinfer1::ITensor* convolutionalLayer(
int layerIdx,
std::map<std::string, std::string>& block,
std::vector<float>& weights,
@@ -18,6 +18,8 @@ nvinfer1::ILayer* convolutionalLayer(
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "convolutional");
assert(block.find("filters") != block.end());
assert(block.find("pad") != block.end());
@@ -40,14 +42,10 @@ nvinfer1::ILayer* convolutionalLayer(
int groups = 1;
if (block.find("groups") != block.end())
{
groups = std::stoi(block.at("groups"));
}
if (block.find("bias") != block.end())
{
bias = std::stoi(block.at("bias"));
}
int pad;
if (padding)
@@ -63,7 +61,8 @@ nvinfer1::ILayer* convolutionalLayer(
nvinfer1::Weights convWt{nvinfer1::DataType::kFLOAT, nullptr, size};
nvinfer1::Weights convBias{nvinfer1::DataType::kFLOAT, nullptr, bias};
if (weightsType == "weights") {
if (weightsType == "weights")
{
if (batchNormalize == false)
{
float* val;
@@ -120,7 +119,8 @@ nvinfer1::ILayer* convolutionalLayer(
trtWeights.push_back(convBias);
}
}
else {
else
{
if (batchNormalize == false)
{
float* val = new float[size];
@@ -177,20 +177,18 @@ nvinfer1::ILayer* convolutionalLayer(
}
}
nvinfer1::IConvolutionLayer* conv = network->addConvolutionNd(
*input, filters, nvinfer1::DimsHW{kernelSize, kernelSize}, convWt, convBias);
nvinfer1::IConvolutionLayer* conv
= network->addConvolutionNd(*input, filters, nvinfer1::Dims{2, {kernelSize, kernelSize}}, convWt, convBias);
assert(conv != nullptr);
std::string convLayerName = "conv_" + std::to_string(layerIdx);
conv->setName(convLayerName.c_str());
conv->setStrideNd(nvinfer1::DimsHW{stride, stride});
conv->setPaddingNd(nvinfer1::DimsHW{pad, pad});
conv->setStrideNd(nvinfer1::Dims{2, {stride, stride}});
conv->setPaddingNd(nvinfer1::Dims{2, {pad, pad}});
if (block.find("groups") != block.end())
{
conv->setNbGroups(groups);
}
nvinfer1::ILayer* output = conv;
output = conv->getOutput(0);
if (batchNormalize == true)
{
@@ -200,36 +198,28 @@ nvinfer1::ILayer* convolutionalLayer(
nvinfer1::Weights power{nvinfer1::DataType::kFLOAT, nullptr, size};
float* shiftWt = new float[size];
for (int i = 0; i < size; ++i)
{
shiftWt[i]
= bnBiases.at(i) - ((bnRunningMean.at(i) * bnWeights.at(i)) / bnRunningVar.at(i));
}
shiftWt[i] = bnBiases.at(i) - ((bnRunningMean.at(i) * bnWeights.at(i)) / bnRunningVar.at(i));
shift.values = shiftWt;
float* scaleWt = new float[size];
for (int i = 0; i < size; ++i)
{
scaleWt[i] = bnWeights.at(i) / bnRunningVar[i];
}
scale.values = scaleWt;
float* powerWt = new float[size];
for (int i = 0; i < size; ++i)
{
powerWt[i] = 1.0;
}
power.values = powerWt;
trtWeights.push_back(shift);
trtWeights.push_back(scale);
trtWeights.push_back(power);
nvinfer1::IScaleLayer* bn = network->addScale(
*output->getOutput(0), nvinfer1::ScaleMode::kCHANNEL, shift, scale, power);
assert(bn != nullptr);
std::string bnLayerName = "batch_norm_" + std::to_string(layerIdx);
bn->setName(bnLayerName.c_str());
output = bn;
nvinfer1::IScaleLayer* batchnorm = network->addScale(*output, nvinfer1::ScaleMode::kCHANNEL, shift, scale, power);
assert(batchnorm != nullptr);
std::string batchnormLayerName = "batchnorm_" + std::to_string(layerIdx);
batchnorm->setName(batchnormLayerName.c_str());
output = batchnorm->getOutput(0);
}
output = activationLayer(layerIdx, activation, output, output->getOutput(0), network);
output = activationLayer(layerIdx, activation, output, network);
assert(output != nullptr);
return output;

View File

@@ -13,7 +13,7 @@
#include "activation_layer.h"
nvinfer1::ILayer* convolutionalLayer(
nvinfer1::ITensor* convolutionalLayer(
int layerIdx,
std::map<std::string, std::string>& block,
std::vector<float>& weights,

View File

@@ -5,17 +5,25 @@
#include "implicit_layer.h"
nvinfer1::ILayer* implicitLayer(
int channels,
nvinfer1::ITensor* implicitLayer(
int layerIdx,
std::map<std::string, std::string>& block,
std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights,
int& weightPtr,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::Weights convWt{nvinfer1::DataType::kFLOAT, nullptr, channels};
nvinfer1::ITensor* output;
float* val = new float[channels];
for (int i = 0; i < channels; ++i)
assert(block.at("type") == "implicit_add" || block.at("type") == "implicit_mul");
assert(block.find("filters") != block.end());
int filters = std::stoi(block.at("filters"));
nvinfer1::Weights convWt{nvinfer1::DataType::kFLOAT, nullptr, filters};
float* val = new float[filters];
for (int i = 0; i < filters; ++i)
{
val[i] = weights[weightPtr];
weightPtr++;
@@ -23,8 +31,11 @@ nvinfer1::ILayer* implicitLayer(
convWt.values = val;
trtWeights.push_back(convWt);
nvinfer1::IConstantLayer* implicit = network->addConstant(nvinfer1::Dims3{static_cast<int>(channels), 1, 1}, convWt);
nvinfer1::IConstantLayer* implicit = network->addConstant(nvinfer1::Dims{3, {filters, 1, 1}}, convWt);
assert(implicit != nullptr);
std::string implicitLayerName = block.at("type") + "_" + std::to_string(layerIdx);
implicit->setName(implicitLayerName.c_str());
output = implicit->getOutput(0);
return implicit;
return output;
}

View File

@@ -12,8 +12,9 @@
#include "NvInfer.h"
nvinfer1::ILayer* implicitLayer(
int channels,
nvinfer1::ITensor* implicitLayer(
int layerIdx,
std::map<std::string, std::string>& block,
std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights,
int& weightPtr,

View File

@@ -1,35 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "maxpool_layer.h"
nvinfer1::ILayer* maxpoolLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
assert(block.at("type") == "maxpool");
assert(block.find("size") != block.end());
assert(block.find("stride") != block.end());
int size = std::stoi(block.at("size"));
int stride = std::stoi(block.at("stride"));
nvinfer1::IPoolingLayer* pool
= network->addPoolingNd(*input, nvinfer1::PoolingType::kMAX, nvinfer1::Dims{2, {size, size}});
assert(pool);
std::string maxpoolLayerName = "maxpool_" + std::to_string(layerIdx);
pool->setStrideNd(nvinfer1::Dims{2, {stride, stride}});
pool->setPaddingNd(nvinfer1::Dims{2, {(size - 1) / 2, (size - 1) / 2}});
if (size == 2 && stride == 1)
{
pool->setPrePadding(nvinfer1::Dims{2, {0, 0}});
pool->setPostPadding(nvinfer1::Dims{2, {1, 1}});
}
pool->setName(maxpoolLayerName.c_str());
return pool;
}

View File

@@ -0,0 +1,57 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "pooling_layer.h"
nvinfer1::ITensor* poolingLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "maxpool" || block.at("type") == "avgpool");
if (block.at("type") == "maxpool")
{
assert(block.find("size") != block.end());
assert(block.find("stride") != block.end());
int size = std::stoi(block.at("size"));
int stride = std::stoi(block.at("stride"));
nvinfer1::IPoolingLayer* maxpool
= network->addPoolingNd(*input, nvinfer1::PoolingType::kMAX, nvinfer1::Dims{2, {size, size}});
assert(maxpool != nullptr);
std::string maxpoolLayerName = "maxpool_" + std::to_string(layerIdx);
maxpool->setName(maxpoolLayerName.c_str());
maxpool->setStrideNd(nvinfer1::Dims{2, {stride, stride}});
maxpool->setPaddingNd(nvinfer1::Dims{2, {(size - 1) / 2, (size - 1) / 2}});
if (size == 2 && stride == 1)
{
maxpool->setPrePadding(nvinfer1::Dims{2, {0, 0}});
maxpool->setPostPadding(nvinfer1::Dims{2, {1, 1}});
}
output = maxpool->getOutput(0);
}
else if (block.at("type") == "avgpool")
{
nvinfer1::Dims inputDims = input->getDimensions();
nvinfer1::IPoolingLayer* avgpool = network->addPoolingNd(
*input, nvinfer1::PoolingType::kAVERAGE, nvinfer1::Dims{2, {inputDims.d[1], inputDims.d[2]}});
assert(avgpool != nullptr);
std::string avgpoolLayerName = "avgpool_" + std::to_string(layerIdx);
avgpool->setName(avgpoolLayerName.c_str());
output = avgpool->getOutput(0);
}
else
{
std::cerr << "Pooling not supported: " << block.at("type") << std::endl;
std::abort();
}
return output;
}

View File

@@ -0,0 +1,21 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __POOLING_LAYER_H__
#define __POOLING_LAYER_H__
#include <map>
#include <cassert>
#include <iostream>
#include "NvInfer.h"
nvinfer1::ITensor* poolingLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -0,0 +1,58 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "reduce_layer.h"
nvinfer1::ITensor* reduceLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "reduce");
assert(block.find("mode") != block.end());
assert(block.find("axes") != block.end());
std::string mode = block.at("mode");
nvinfer1::ReduceOperation operation;
if (mode == "mean")
operation = nvinfer1::ReduceOperation::kAVG;
std::string strAxes = block.at("axes");
std::vector<int32_t> axes;
size_t lastPos = 0, pos = 0;
while ((pos = strAxes.find(',', lastPos)) != std::string::npos)
{
int vL = std::stoi(trim(strAxes.substr(lastPos, pos - lastPos)));
axes.push_back(vL);
lastPos = pos + 1;
}
if (lastPos < strAxes.length())
{
std::string lastV = trim(strAxes.substr(lastPos));
if (!lastV.empty())
axes.push_back(std::stoi(lastV));
}
assert(!axes.empty());
uint32_t axisMask = 0;
for (int axis : axes)
axisMask |= 1 << axis;
bool keepDims = false;
if (block.find("keep") != block.end())
keepDims = std::stoi(block.at("keep")) == 1 ? true : false;
nvinfer1::IReduceLayer* reduce = network->addReduce(*input, operation, axisMask, keepDims);
assert(reduce != nullptr);
std::string reduceLayerName = "reduce_" + std::to_string(layerIdx);
reduce->setName(reduceLayerName.c_str());
output = reduce->getOutput(0);
return output;
}

View File

@@ -0,0 +1,18 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __REDUCE_LAYER_H__
#define __REDUCE_LAYER_H__
#include "NvInfer.h"
#include "../utils.h"
nvinfer1::ITensor* reduceLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -0,0 +1,113 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "reg_layer.h"
nvinfer1::ITensor* regLayer(
int layerIdx,
std::map<std::string, std::string>& block,
std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights,
int& weightPtr,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "reg");
nvinfer1::IShuffleLayer* shuffle = network->addShuffle(*input);
assert(shuffle != nullptr);
std::string shuffleLayerName = "shuffle_" + std::to_string(layerIdx);
shuffle->setName(shuffleLayerName.c_str());
nvinfer1::Permutation permutation;
permutation.order[0] = 1;
permutation.order[1] = 0;
shuffle->setFirstTranspose(permutation);
output = shuffle->getOutput(0);
nvinfer1::Dims shuffleDims = output->getDimensions();
nvinfer1::ISliceLayer* sliceLt = network->addSlice(
*output, nvinfer1::Dims{2, {0, 0}}, nvinfer1::Dims{2, {shuffleDims.d[0], 2}}, nvinfer1::Dims{2, {1, 1}});
assert(sliceLt != nullptr);
std::string sliceLtLayerName = "slice_lt_" + std::to_string(layerIdx);
sliceLt->setName(sliceLtLayerName.c_str());
nvinfer1::ITensor* lt = sliceLt->getOutput(0);
nvinfer1::ISliceLayer* sliceRb = network->addSlice(
*output, nvinfer1::Dims{2, {0, 2}}, nvinfer1::Dims{2, {shuffleDims.d[0], 2}}, nvinfer1::Dims{2, {1, 1}});
assert(sliceRb != nullptr);
std::string sliceRbLayerName = "slice_rb_" + std::to_string(layerIdx);
sliceRb->setName(sliceRbLayerName.c_str());
nvinfer1::ITensor* rb = sliceRb->getOutput(0);
int channels = shuffleDims.d[0] * 2;
nvinfer1::Weights anchorPointsWt{nvinfer1::DataType::kFLOAT, nullptr, channels};
float* val = new float[channels];
for (int i = 0; i < channels; ++i)
{
val[i] = weights[weightPtr];
weightPtr++;
}
anchorPointsWt.values = val;
trtWeights.push_back(anchorPointsWt);
nvinfer1::IConstantLayer* anchorPoints = network->addConstant(nvinfer1::Dims{2, {shuffleDims.d[0], 2}}, anchorPointsWt);
assert(anchorPoints != nullptr);
std::string anchorPointsLayerName = "anchor_points_" + std::to_string(layerIdx);
anchorPoints->setName(anchorPointsLayerName.c_str());
nvinfer1::ITensor* anchorPointsTensor = anchorPoints->getOutput(0);
nvinfer1::IElementWiseLayer* x1y1
= network->addElementWise(*anchorPointsTensor, *lt, nvinfer1::ElementWiseOperation::kSUB);
assert(x1y1 != nullptr);
std::string x1y1LayerName = "x1y1_" + std::to_string(layerIdx);
x1y1->setName(x1y1LayerName.c_str());
nvinfer1::ITensor* x1y1Tensor = x1y1->getOutput(0);
nvinfer1::IElementWiseLayer* x2y2
= network->addElementWise(*rb, *anchorPointsTensor, nvinfer1::ElementWiseOperation::kSUM);
assert(x2y2 != nullptr);
std::string x2y2LayerName = "x2y2_" + std::to_string(layerIdx);
x2y2->setName(x2y2LayerName.c_str());
nvinfer1::ITensor* x2y2Tensor = x2y2->getOutput(0);
std::vector<nvinfer1::ITensor*> concatInputs;
concatInputs.push_back(x1y1Tensor);
concatInputs.push_back(x2y2Tensor);
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(concatInputs.data(), concatInputs.size());
assert(concat != nullptr);
std::string concatLayerName = "concat_" + std::to_string(layerIdx);
concat->setName(concatLayerName.c_str());
concat->setAxis(1);
output = concat->getOutput(0);
channels = shuffleDims.d[0];
nvinfer1::Weights stridePointsWt{nvinfer1::DataType::kFLOAT, nullptr, channels};
val = new float[channels];
for (int i = 0; i < channels; ++i)
{
val[i] = weights[weightPtr];
weightPtr++;
}
stridePointsWt.values = val;
trtWeights.push_back(stridePointsWt);
nvinfer1::IConstantLayer* stridePoints = network->addConstant(nvinfer1::Dims{2, {shuffleDims.d[0], 1}}, stridePointsWt);
assert(stridePoints != nullptr);
std::string stridePointsLayerName = "stride_points_" + std::to_string(layerIdx);
stridePoints->setName(stridePointsLayerName.c_str());
nvinfer1::ITensor* stridePointsTensor = stridePoints->getOutput(0);
nvinfer1::IElementWiseLayer* pred
= network->addElementWise(*output, *stridePointsTensor, nvinfer1::ElementWiseOperation::kPROD);
assert(pred != nullptr);
std::string predLayerName = "pred_" + std::to_string(layerIdx);
pred->setName(predLayerName.c_str());
output = pred->getOutput(0);
return output;
}

View File

@@ -0,0 +1,24 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __REG_LAYER_H__
#define __REG_LAYER_H__
#include <map>
#include <vector>
#include <cassert>
#include "NvInfer.h"
nvinfer1::ITensor* regLayer(
int layerIdx,
std::map<std::string, std::string>& block,
std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights,
int& weightPtr,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -0,0 +1,62 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "reorg_layer.h"
nvinfer1::ITensor* reorgLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "reorg");
nvinfer1::Dims inputDims = input->getDimensions();
nvinfer1::ISliceLayer *slice1 = network->addSlice(
*input, nvinfer1::Dims{3, {0, 0, 0}}, nvinfer1::Dims{3, {inputDims.d[0], inputDims.d[1] / 2, inputDims.d[2] / 2}},
nvinfer1::Dims{3, {1, 2, 2}});
assert(slice1 != nullptr);
std::string slice1LayerName = "slice1_" + std::to_string(layerIdx);
slice1->setName(slice1LayerName.c_str());
nvinfer1::ISliceLayer *slice2 = network->addSlice(
*input, nvinfer1::Dims{3, {0, 0, 1}}, nvinfer1::Dims{3, {inputDims.d[0], inputDims.d[1] / 2, inputDims.d[2] / 2}},
nvinfer1::Dims{3, {1, 2, 2}});
assert(slice2 != nullptr);
std::string slice2LayerName = "slice2_" + std::to_string(layerIdx);
slice2->setName(slice2LayerName.c_str());
nvinfer1::ISliceLayer *slice3 = network->addSlice(
*input, nvinfer1::Dims{3, {0, 1, 0}}, nvinfer1::Dims{3, {inputDims.d[0], inputDims.d[1] / 2, inputDims.d[2] / 2}},
nvinfer1::Dims{3, {1, 2, 2}});
assert(slice3 != nullptr);
std::string slice3LayerName = "slice3_" + std::to_string(layerIdx);
slice3->setName(slice3LayerName.c_str());
nvinfer1::ISliceLayer *slice4 = network->addSlice(
*input, nvinfer1::Dims{3, {0, 1, 1}}, nvinfer1::Dims{3, {inputDims.d[0], inputDims.d[1] / 2, inputDims.d[2] / 2}},
nvinfer1::Dims{3, {1, 2, 2}});
assert(slice4 != nullptr);
std::string slice4LayerName = "slice4_" + std::to_string(layerIdx);
slice4->setName(slice4LayerName.c_str());
std::vector<nvinfer1::ITensor*> concatInputs;
concatInputs.push_back(slice1->getOutput(0));
concatInputs.push_back(slice2->getOutput(0));
concatInputs.push_back(slice3->getOutput(0));
concatInputs.push_back(slice4->getOutput(0));
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(concatInputs.data(), concatInputs.size());
assert(concat != nullptr);
std::string concatLayerName = "concat_" + std::to_string(layerIdx);
concat->setName(concatLayerName.c_str());
concat->setAxis(0);
output = concat->getOutput(0);
return output;
}

View File

@@ -12,8 +12,9 @@
#include "NvInfer.h"
nvinfer1::ILayer* reorgV5Layer(
nvinfer1::ITensor* reorgLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);

View File

@@ -1,62 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "reorgv5_layer.h"
nvinfer1::ILayer* reorgV5Layer(
int layerIdx,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::Dims prevTensorDims = input->getDimensions();
nvinfer1::ISliceLayer *slice1 = network->addSlice(
*input,
nvinfer1::Dims3{0, 0, 0},
nvinfer1::Dims3{prevTensorDims.d[0], prevTensorDims.d[1] / 2, prevTensorDims.d[2] / 2},
nvinfer1::Dims3{1, 2, 2});
assert(slice1 != nullptr);
std::string slice1LayerName = "slice1_" + std::to_string(layerIdx);
slice1->setName(slice1LayerName.c_str());
nvinfer1::ISliceLayer *slice2 = network->addSlice(
*input,
nvinfer1::Dims3{0, 1, 0},
nvinfer1::Dims3{prevTensorDims.d[0], prevTensorDims.d[1] / 2, prevTensorDims.d[2] / 2},
nvinfer1::Dims3{1, 2, 2});
assert(slice2 != nullptr);
std::string slice2LayerName = "slice2_" + std::to_string(layerIdx);
slice2->setName(slice2LayerName.c_str());
nvinfer1::ISliceLayer *slice3 = network->addSlice(
*input,
nvinfer1::Dims3{0, 0, 1},
nvinfer1::Dims3{prevTensorDims.d[0], prevTensorDims.d[1] / 2, prevTensorDims.d[2] / 2},
nvinfer1::Dims3{1, 2, 2});
assert(slice3 != nullptr);
std::string slice3LayerName = "slice3_" + std::to_string(layerIdx);
slice3->setName(slice3LayerName.c_str());
nvinfer1::ISliceLayer *slice4 = network->addSlice(
*input,
nvinfer1::Dims3{0, 1, 1},
nvinfer1::Dims3{prevTensorDims.d[0], prevTensorDims.d[1] / 2, prevTensorDims.d[2] / 2},
nvinfer1::Dims3{1, 2, 2});
assert(slice4 != nullptr);
std::string slice4LayerName = "slice4_" + std::to_string(layerIdx);
slice4->setName(slice4LayerName.c_str());
std::vector<nvinfer1::ITensor*> concatInputs;
concatInputs.push_back (slice1->getOutput(0));
concatInputs.push_back (slice2->getOutput(0));
concatInputs.push_back (slice3->getOutput(0));
concatInputs.push_back (slice4->getOutput(0));
nvinfer1::IConcatenationLayer* concat =
network->addConcatenation(concatInputs.data(), concatInputs.size());
assert(concat != nullptr);
return concat;
}

View File

@@ -5,58 +5,73 @@
#include "route_layer.h"
nvinfer1::ILayer* routeLayer(
nvinfer1::ITensor* routeLayer(
int layerIdx,
std::string& layers,
std::map<std::string, std::string>& block,
std::vector<nvinfer1::ITensor*> tensorOutputs,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "route");
assert(block.find("layers") != block.end());
std::string strLayers = block.at("layers");
std::vector<int> idxLayers;
size_t lastPos = 0, pos = 0;
while ((pos = strLayers.find(',', lastPos)) != std::string::npos) {
while ((pos = strLayers.find(',', lastPos)) != std::string::npos)
{
int vL = std::stoi(trim(strLayers.substr(lastPos, pos - lastPos)));
idxLayers.push_back (vL);
idxLayers.push_back(vL);
lastPos = pos + 1;
}
if (lastPos < strLayers.length()) {
if (lastPos < strLayers.length())
{
std::string lastV = trim(strLayers.substr(lastPos));
if (!lastV.empty()) {
idxLayers.push_back (std::stoi(lastV));
}
if (!lastV.empty())
idxLayers.push_back(std::stoi(lastV));
}
assert (!idxLayers.empty());
std::vector<nvinfer1::ITensor*> concatInputs;
for (int idxLayer : idxLayers) {
if (idxLayer < 0) {
idxLayer = tensorOutputs.size() + idxLayer;
}
assert (idxLayer >= 0 && idxLayer < (int)tensorOutputs.size());
concatInputs.push_back (tensorOutputs[idxLayer]);
for (uint i = 0; i < idxLayers.size(); ++i)
{
if (idxLayers[i] < 0)
idxLayers[i] = tensorOutputs.size() + idxLayers[i];
assert (idxLayers[i] >= 0 && idxLayers[i] < (int)tensorOutputs.size());
concatInputs.push_back(tensorOutputs[idxLayers[i]]);
if (i < idxLayers.size() - 1)
layers += std::to_string(idxLayers[i]) + ", ";
}
layers += std::to_string(idxLayers[idxLayers.size() - 1]);
nvinfer1::IConcatenationLayer* concat =
network->addConcatenation(concatInputs.data(), concatInputs.size());
int axis = 0;
if (block.find("axis") != block.end())
axis = std::stoi(block.at("axis"));
if (axis < 0)
axis = concatInputs[0]->getDimensions().nbDims + axis;
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(concatInputs.data(), concatInputs.size());
assert(concat != nullptr);
std::string concatLayerName = "route_" + std::to_string(layerIdx - 1);
std::string concatLayerName = "route_" + std::to_string(layerIdx);
concat->setName(concatLayerName.c_str());
concat->setAxis(0);
concat->setAxis(axis);
output = concat->getOutput(0);
nvinfer1::ILayer* output = concat;
if (block.find("groups") != block.end()) {
nvinfer1::Dims prevTensorDims = output->getOutput(0)->getDimensions();
if (block.find("groups") != block.end())
{
nvinfer1::Dims prevTensorDims = output->getDimensions();
int groups = stoi(block.at("groups"));
int group_id = stoi(block.at("group_id"));
int startSlice = (prevTensorDims.d[0] / groups) * group_id;
int channelSlice = (prevTensorDims.d[0] / groups);
nvinfer1::ISliceLayer* sl = network->addSlice(
*output->getOutput(0),
nvinfer1::Dims3{startSlice, 0, 0},
nvinfer1::Dims3{channelSlice, prevTensorDims.d[1], prevTensorDims.d[2]},
nvinfer1::Dims3{1, 1, 1});
assert(sl != nullptr);
output = sl;
nvinfer1::ISliceLayer* slice = network->addSlice(
*output, nvinfer1::Dims{3, {startSlice, 0, 0}},
nvinfer1::Dims{3, {channelSlice, prevTensorDims.d[1], prevTensorDims.d[2]}}, nvinfer1::Dims{3, {1, 1, 1}});
assert(slice != nullptr);
std::string sliceLayerName = "slice_" + std::to_string(layerIdx);
slice->setName(sliceLayerName.c_str());
output = slice->getOutput(0);
}
return output;

View File

@@ -9,8 +9,9 @@
#include "NvInfer.h"
#include "../utils.h"
nvinfer1::ILayer* routeLayer(
nvinfer1::ITensor* routeLayer(
int layerIdx,
std::string& layers,
std::map<std::string, std::string>& block,
std::vector<nvinfer1::ITensor*> tensorOutputs,
nvinfer1::INetworkDefinition* network);

View File

@@ -5,40 +5,47 @@
#include "shortcut_layer.h"
nvinfer1::ILayer* shortcutLayer(
nvinfer1::ITensor* shortcutLayer(
int layerIdx,
std::string mode,
std::string activation,
std::string inputVol,
std::string shortcutVol,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::ITensor* shortcutTensor,
nvinfer1::ITensor* shortcutInput,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ILayer* output;
nvinfer1::ITensor* outputTensor;
nvinfer1::ITensor* output;
if (inputVol != shortcutVol)
assert(block.at("type") == "shortcut");
nvinfer1::ElementWiseOperation operation = nvinfer1::ElementWiseOperation::kSUM;
if (mode == "mul")
operation = nvinfer1::ElementWiseOperation::kPROD;
if (mode == "add" && inputVol != shortcutVol)
{
nvinfer1::ISliceLayer* sl = network->addSlice(
*shortcutTensor,
nvinfer1::Dims3{0, 0, 0},
input->getDimensions(),
nvinfer1::Dims3{1, 1, 1});
assert(sl != nullptr);
outputTensor = sl->getOutput(0);
assert(outputTensor != nullptr);
} else
nvinfer1::ISliceLayer* slice = network->addSlice(
*shortcutInput, nvinfer1::Dims{3, {0, 0, 0}}, input->getDimensions(), nvinfer1::Dims{3, {1, 1, 1}});
assert(slice != nullptr);
std::string sliceLayerName = "slice_" + std::to_string(layerIdx);
slice->setName(sliceLayerName.c_str());
output = slice->getOutput(0);
}
else
{
outputTensor = shortcutTensor;
assert(outputTensor != nullptr);
output = shortcutInput;
}
nvinfer1::IElementWiseLayer* ew = network->addElementWise(
*input, *outputTensor,
nvinfer1::ElementWiseOperation::kSUM);
assert(ew != nullptr);
nvinfer1::IElementWiseLayer* shortcut = network->addElementWise(*input, *output, operation);
assert(shortcut != nullptr);
std::string shortcutLayerName = "shortcut_" + std::to_string(layerIdx);
shortcut->setName(shortcutLayerName.c_str());
output = shortcut->getOutput(0);
output = activationLayer(layerIdx, activation, ew, ew->getOutput(0), network);
output = activationLayer(layerIdx, activation, output, network);
assert(output != nullptr);
return output;

View File

@@ -6,17 +6,21 @@
#ifndef __SHORTCUT_LAYER_H__
#define __SHORTCUT_LAYER_H__
#include <map>
#include "NvInfer.h"
#include "activation_layer.h"
nvinfer1::ILayer* shortcutLayer(
nvinfer1::ITensor* shortcutLayer(
int layerIdx,
std::string mode,
std::string activation,
std::string inputVol,
std::string shortcutVol,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::ITensor* shortcutTensor,
nvinfer1::ITensor* shortcut,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -0,0 +1,123 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "shuffle_layer.h"
nvinfer1::ITensor* shuffleLayer(
int layerIdx,
std::string& layer,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
std::vector<nvinfer1::ITensor*> tensorOutputs,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "shuffle");
nvinfer1::IShuffleLayer* shuffle = network->addShuffle(*input);
assert(shuffle != nullptr);
std::string shuffleLayerName = "shuffle_" + std::to_string(layerIdx);
shuffle->setName(shuffleLayerName.c_str());
if (block.find("reshape") != block.end())
{
std::string strReshape = block.at("reshape");
std::vector<int32_t> reshape;
size_t lastPos = 0, pos = 0;
while ((pos = strReshape.find(',', lastPos)) != std::string::npos)
{
int vL = std::stoi(trim(strReshape.substr(lastPos, pos - lastPos)));
reshape.push_back(vL);
lastPos = pos + 1;
}
if (lastPos < strReshape.length())
{
std::string lastV = trim(strReshape.substr(lastPos));
if (!lastV.empty())
reshape.push_back(std::stoi(lastV));
}
assert(!reshape.empty());
int from = -1;
if (block.find("from") != block.end())
from = std::stoi(block.at("from"));
if (from < 0)
from = tensorOutputs.size() + from;
layer = std::to_string(from);
nvinfer1::Dims inputTensorDims = tensorOutputs[from]->getDimensions();
int32_t l = inputTensorDims.d[1] * inputTensorDims.d[2];
nvinfer1::Dims reshapeDims;
reshapeDims.nbDims = reshape.size();
for (uint i = 0; i < reshape.size(); ++i)
if (reshape[i] == 0)
reshapeDims.d[i] = l;
else
reshapeDims.d[i] = reshape[i];
shuffle->setReshapeDimensions(reshapeDims);
}
if (block.find("transpose1") != block.end())
{
std::string strTranspose1 = block.at("transpose1");
std::vector<int32_t> transpose1;
size_t lastPos = 0, pos = 0;
while ((pos = strTranspose1.find(',', lastPos)) != std::string::npos)
{
int vL = std::stoi(trim(strTranspose1.substr(lastPos, pos - lastPos)));
transpose1.push_back(vL);
lastPos = pos + 1;
}
if (lastPos < strTranspose1.length())
{
std::string lastV = trim(strTranspose1.substr(lastPos));
if (!lastV.empty())
transpose1.push_back(std::stoi(lastV));
}
assert(!transpose1.empty());
nvinfer1::Permutation permutation1;
for (uint i = 0; i < transpose1.size(); ++i)
permutation1.order[i] = transpose1[i];
shuffle->setFirstTranspose(permutation1);
}
if (block.find("transpose2") != block.end())
{
std::string strTranspose2 = block.at("transpose2");
std::vector<int32_t> transpose2;
size_t lastPos = 0, pos = 0;
while ((pos = strTranspose2.find(',', lastPos)) != std::string::npos)
{
int vL = std::stoi(trim(strTranspose2.substr(lastPos, pos - lastPos)));
transpose2.push_back(vL);
lastPos = pos + 1;
}
if (lastPos < strTranspose2.length())
{
std::string lastV = trim(strTranspose2.substr(lastPos));
if (!lastV.empty())
transpose2.push_back(std::stoi(lastV));
}
assert(!transpose2.empty());
nvinfer1::Permutation permutation2;
for (uint i = 0; i < transpose2.size(); ++i)
permutation2.order[i] = transpose2[i];
shuffle->setSecondTranspose(permutation2);
}
output = shuffle->getOutput(0);
return output;
}

View File

@@ -0,0 +1,20 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __SHUFFLE_LAYER_H__
#define __SHUFFLE_LAYER_H__
#include "NvInfer.h"
#include "../utils.h"
nvinfer1::ITensor* shuffleLayer(
int layerIdx,
std::string& layer,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
std::vector<nvinfer1::ITensor*> tensorOutputs,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -0,0 +1,29 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "softmax_layer.h"
nvinfer1::ITensor* softmaxLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "softmax");
assert(block.find("axes") != block.end());
int axes = std::stoi(block.at("axes"));
nvinfer1::ISoftMaxLayer* softmax = network->addSoftMax(*input);
assert(softmax != nullptr);
std::string softmaxLayerName = "softmax_" + std::to_string(layerIdx);
softmax->setName(softmaxLayerName.c_str());
softmax->setAxes(1 << axes);
output = softmax->getOutput(0);
return output;
}

View File

@@ -0,0 +1,20 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __SOFTMAX_LAYER_H__
#define __SOFTMAX_LAYER_H__
#include <map>
#include <cassert>
#include "NvInfer.h"
nvinfer1::ITensor* softmaxLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -5,20 +5,28 @@
#include "upsample_layer.h"
nvinfer1::ILayer* upsampleLayer(
nvinfer1::ITensor* upsampleLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "upsample");
assert(block.find("stride") != block.end());
int stride = std::stoi(block.at("stride"));
nvinfer1::IResizeLayer* resize_layer = network->addResize(*input);
resize_layer->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
float scale[3] = {1, static_cast<float>(stride), static_cast<float>(stride)};
resize_layer->setScales(scale, 3);
std::string layer_name = "upsample_" + std::to_string(layerIdx);
resize_layer->setName(layer_name.c_str());
return resize_layer;
nvinfer1::IResizeLayer* resize = network->addResize(*input);
assert(resize != nullptr);
std::string resizeLayerName = "upsample_" + std::to_string(layerIdx);
resize->setName(resizeLayerName.c_str());
resize->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
resize->setScales(scale, 3);
output = resize->getOutput(0);
return output;
}

View File

@@ -7,12 +7,11 @@
#define __UPSAMPLE_LAYER_H__
#include <map>
#include <vector>
#include <cassert>
#include "NvInfer.h"
nvinfer1::ILayer* upsampleLayer(
nvinfer1::ITensor* upsampleLayer(
int layerIdx,
std::map<std::string, std::string>& block,
nvinfer1::ITensor* input,