Add YOLOv8 support
This commit is contained in:
@@ -5,118 +5,107 @@
|
||||
|
||||
#include "activation_layer.h"
|
||||
|
||||
nvinfer1::ITensor* activationLayer(
|
||||
int layerIdx,
|
||||
std::string activation,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
|
||||
if (activation == "linear")
|
||||
{
|
||||
output = input;
|
||||
}
|
||||
else if (activation == "relu")
|
||||
{
|
||||
nvinfer1::IActivationLayer* relu = network->addActivation(*input, nvinfer1::ActivationType::kRELU);
|
||||
assert(relu != nullptr);
|
||||
std::string reluLayerName = "relu_" + std::to_string(layerIdx);
|
||||
relu->setName(reluLayerName.c_str());
|
||||
output = relu->getOutput(0);
|
||||
}
|
||||
else if (activation == "sigmoid" || activation == "logistic")
|
||||
{
|
||||
nvinfer1::IActivationLayer* sigmoid = network->addActivation(*input, nvinfer1::ActivationType::kSIGMOID);
|
||||
assert(sigmoid != nullptr);
|
||||
std::string sigmoidLayerName = "sigmoid_" + std::to_string(layerIdx);
|
||||
sigmoid->setName(sigmoidLayerName.c_str());
|
||||
output = sigmoid->getOutput(0);
|
||||
}
|
||||
else if (activation == "tanh")
|
||||
{
|
||||
nvinfer1::IActivationLayer* tanh = network->addActivation(*input, nvinfer1::ActivationType::kTANH);
|
||||
assert(tanh != nullptr);
|
||||
std::string tanhLayerName = "tanh_" + std::to_string(layerIdx);
|
||||
tanh->setName(tanhLayerName.c_str());
|
||||
output = tanh->getOutput(0);
|
||||
}
|
||||
else if (activation == "leaky")
|
||||
{
|
||||
nvinfer1::IActivationLayer* leaky = network->addActivation(*input, nvinfer1::ActivationType::kLEAKY_RELU);
|
||||
assert(leaky != nullptr);
|
||||
std::string leakyLayerName = "leaky_" + std::to_string(layerIdx);
|
||||
leaky->setName(leakyLayerName.c_str());
|
||||
leaky->setAlpha(0.1);
|
||||
output = leaky->getOutput(0);
|
||||
}
|
||||
else if (activation == "softplus")
|
||||
{
|
||||
nvinfer1::IActivationLayer* softplus = network->addActivation(*input, nvinfer1::ActivationType::kSOFTPLUS);
|
||||
assert(softplus != nullptr);
|
||||
std::string softplusLayerName = "softplus_" + std::to_string(layerIdx);
|
||||
softplus->setName(softplusLayerName.c_str());
|
||||
output = softplus->getOutput(0);
|
||||
}
|
||||
else if (activation == "mish")
|
||||
{
|
||||
nvinfer1::IActivationLayer* softplus = network->addActivation(*input, nvinfer1::ActivationType::kSOFTPLUS);
|
||||
assert(softplus != nullptr);
|
||||
std::string softplusLayerName = "softplus_" + std::to_string(layerIdx);
|
||||
softplus->setName(softplusLayerName.c_str());
|
||||
nvinfer1::IActivationLayer* tanh = network->addActivation(*softplus->getOutput(0), nvinfer1::ActivationType::kTANH);
|
||||
assert(tanh != nullptr);
|
||||
std::string tanhLayerName = "tanh_" + std::to_string(layerIdx);
|
||||
tanh->setName(tanhLayerName.c_str());
|
||||
nvinfer1::IElementWiseLayer* mish
|
||||
= network->addElementWise(*input, *tanh->getOutput(0), nvinfer1::ElementWiseOperation::kPROD);
|
||||
assert(mish != nullptr);
|
||||
std::string mishLayerName = "mish_" + std::to_string(layerIdx);
|
||||
mish->setName(mishLayerName.c_str());
|
||||
output = mish->getOutput(0);
|
||||
}
|
||||
else if (activation == "silu" || activation == "swish")
|
||||
{
|
||||
nvinfer1::IActivationLayer* sigmoid = network->addActivation(*input, nvinfer1::ActivationType::kSIGMOID);
|
||||
assert(sigmoid != nullptr);
|
||||
std::string sigmoidLayerName = "sigmoid_" + std::to_string(layerIdx);
|
||||
sigmoid->setName(sigmoidLayerName.c_str());
|
||||
nvinfer1::IElementWiseLayer* silu
|
||||
= network->addElementWise(*input, *sigmoid->getOutput(0), nvinfer1::ElementWiseOperation::kPROD);
|
||||
assert(silu != nullptr);
|
||||
std::string siluLayerName = "silu_" + std::to_string(layerIdx);
|
||||
silu->setName(siluLayerName.c_str());
|
||||
output = silu->getOutput(0);
|
||||
}
|
||||
else if (activation == "hardsigmoid")
|
||||
{
|
||||
nvinfer1::IActivationLayer* hardsigmoid = network->addActivation(*input, nvinfer1::ActivationType::kHARD_SIGMOID);
|
||||
assert(hardsigmoid != nullptr);
|
||||
std::string hardsigmoidLayerName = "hardsigmoid_" + std::to_string(layerIdx);
|
||||
hardsigmoid->setName(hardsigmoidLayerName.c_str());
|
||||
hardsigmoid->setAlpha(1.0 / 6.0);
|
||||
hardsigmoid->setBeta(0.5);
|
||||
output = hardsigmoid->getOutput(0);
|
||||
}
|
||||
else if (activation == "hardswish")
|
||||
{
|
||||
nvinfer1::IActivationLayer* hardsigmoid = network->addActivation(*input, nvinfer1::ActivationType::kHARD_SIGMOID);
|
||||
assert(hardsigmoid != nullptr);
|
||||
std::string hardsigmoidLayerName = "hardsigmoid_" + std::to_string(layerIdx);
|
||||
hardsigmoid->setName(hardsigmoidLayerName.c_str());
|
||||
hardsigmoid->setAlpha(1.0 / 6.0);
|
||||
hardsigmoid->setBeta(0.5);
|
||||
nvinfer1::IElementWiseLayer* hardswish
|
||||
= network->addElementWise(*input, *hardsigmoid->getOutput(0), nvinfer1::ElementWiseOperation::kPROD);
|
||||
assert(hardswish != nullptr);
|
||||
std::string hardswishLayerName = "hardswish_" + std::to_string(layerIdx);
|
||||
hardswish->setName(hardswishLayerName.c_str());
|
||||
output = hardswish->getOutput(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cerr << "Activation not supported: " << activation << std::endl;
|
||||
std::abort();
|
||||
}
|
||||
return output;
|
||||
nvinfer1::ITensor*
|
||||
activationLayer(int layerIdx, std::string activation, nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network,
|
||||
std::string layerName)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
if (activation == "linear")
|
||||
output = input;
|
||||
else if (activation == "relu") {
|
||||
nvinfer1::IActivationLayer* relu = network->addActivation(*input, nvinfer1::ActivationType::kRELU);
|
||||
assert(relu != nullptr);
|
||||
std::string reluLayerName = "relu_" + layerName + std::to_string(layerIdx);
|
||||
relu->setName(reluLayerName.c_str());
|
||||
output = relu->getOutput(0);
|
||||
}
|
||||
else if (activation == "sigmoid" || activation == "logistic") {
|
||||
nvinfer1::IActivationLayer* sigmoid = network->addActivation(*input, nvinfer1::ActivationType::kSIGMOID);
|
||||
assert(sigmoid != nullptr);
|
||||
std::string sigmoidLayerName = "sigmoid_" + layerName + std::to_string(layerIdx);
|
||||
sigmoid->setName(sigmoidLayerName.c_str());
|
||||
output = sigmoid->getOutput(0);
|
||||
}
|
||||
else if (activation == "tanh") {
|
||||
nvinfer1::IActivationLayer* tanh = network->addActivation(*input, nvinfer1::ActivationType::kTANH);
|
||||
assert(tanh != nullptr);
|
||||
std::string tanhLayerName = "tanh_" + layerName + std::to_string(layerIdx);
|
||||
tanh->setName(tanhLayerName.c_str());
|
||||
output = tanh->getOutput(0);
|
||||
}
|
||||
else if (activation == "leaky") {
|
||||
nvinfer1::IActivationLayer* leaky = network->addActivation(*input, nvinfer1::ActivationType::kLEAKY_RELU);
|
||||
assert(leaky != nullptr);
|
||||
std::string leakyLayerName = "leaky_" + layerName + std::to_string(layerIdx);
|
||||
leaky->setName(leakyLayerName.c_str());
|
||||
leaky->setAlpha(0.1);
|
||||
output = leaky->getOutput(0);
|
||||
}
|
||||
else if (activation == "softplus") {
|
||||
nvinfer1::IActivationLayer* softplus = network->addActivation(*input, nvinfer1::ActivationType::kSOFTPLUS);
|
||||
assert(softplus != nullptr);
|
||||
std::string softplusLayerName = "softplus_" + layerName + std::to_string(layerIdx);
|
||||
softplus->setName(softplusLayerName.c_str());
|
||||
output = softplus->getOutput(0);
|
||||
}
|
||||
else if (activation == "mish") {
|
||||
nvinfer1::IActivationLayer* softplus = network->addActivation(*input, nvinfer1::ActivationType::kSOFTPLUS);
|
||||
assert(softplus != nullptr);
|
||||
std::string softplusLayerName = "softplus_" + layerName + std::to_string(layerIdx);
|
||||
softplus->setName(softplusLayerName.c_str());
|
||||
nvinfer1::IActivationLayer* tanh = network->addActivation(*softplus->getOutput(0), nvinfer1::ActivationType::kTANH);
|
||||
assert(tanh != nullptr);
|
||||
std::string tanhLayerName = "tanh_" + layerName + std::to_string(layerIdx);
|
||||
tanh->setName(tanhLayerName.c_str());
|
||||
nvinfer1::IElementWiseLayer* mish = network->addElementWise(*input, *tanh->getOutput(0),
|
||||
nvinfer1::ElementWiseOperation::kPROD);
|
||||
assert(mish != nullptr);
|
||||
std::string mishLayerName = "mish_" + layerName + std::to_string(layerIdx);
|
||||
mish->setName(mishLayerName.c_str());
|
||||
output = mish->getOutput(0);
|
||||
}
|
||||
else if (activation == "silu" || activation == "swish") {
|
||||
nvinfer1::IActivationLayer* sigmoid = network->addActivation(*input, nvinfer1::ActivationType::kSIGMOID);
|
||||
assert(sigmoid != nullptr);
|
||||
std::string sigmoidLayerName = "sigmoid_" + layerName + std::to_string(layerIdx);
|
||||
sigmoid->setName(sigmoidLayerName.c_str());
|
||||
nvinfer1::IElementWiseLayer* silu = network->addElementWise(*input, *sigmoid->getOutput(0),
|
||||
nvinfer1::ElementWiseOperation::kPROD);
|
||||
assert(silu != nullptr);
|
||||
std::string siluLayerName = "silu_" + layerName + std::to_string(layerIdx);
|
||||
silu->setName(siluLayerName.c_str());
|
||||
output = silu->getOutput(0);
|
||||
}
|
||||
else if (activation == "hardsigmoid") {
|
||||
nvinfer1::IActivationLayer* hardsigmoid = network->addActivation(*input, nvinfer1::ActivationType::kHARD_SIGMOID);
|
||||
assert(hardsigmoid != nullptr);
|
||||
std::string hardsigmoidLayerName = "hardsigmoid_" + layerName + std::to_string(layerIdx);
|
||||
hardsigmoid->setName(hardsigmoidLayerName.c_str());
|
||||
hardsigmoid->setAlpha(1.0 / 6.0);
|
||||
hardsigmoid->setBeta(0.5);
|
||||
output = hardsigmoid->getOutput(0);
|
||||
}
|
||||
else if (activation == "hardswish") {
|
||||
nvinfer1::IActivationLayer* hardsigmoid = network->addActivation(*input, nvinfer1::ActivationType::kHARD_SIGMOID);
|
||||
assert(hardsigmoid != nullptr);
|
||||
std::string hardsigmoidLayerName = "hardsigmoid_" + layerName + std::to_string(layerIdx);
|
||||
hardsigmoid->setName(hardsigmoidLayerName.c_str());
|
||||
hardsigmoid->setAlpha(1.0 / 6.0);
|
||||
hardsigmoid->setBeta(0.5);
|
||||
nvinfer1::IElementWiseLayer* hardswish = network->addElementWise(*input, *hardsigmoid->getOutput(0),
|
||||
nvinfer1::ElementWiseOperation::kPROD);
|
||||
assert(hardswish != nullptr);
|
||||
std::string hardswishLayerName = "hardswish_" + layerName + std::to_string(layerIdx);
|
||||
hardswish->setName(hardswishLayerName.c_str());
|
||||
output = hardswish->getOutput(0);
|
||||
}
|
||||
else {
|
||||
std::cerr << "Activation not supported: " << activation << std::endl;
|
||||
assert(0);
|
||||
}
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -6,15 +6,11 @@
|
||||
#ifndef __ACTIVATION_LAYER_H__
|
||||
#define __ACTIVATION_LAYER_H__
|
||||
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ITensor* activationLayer(
|
||||
int layerIdx,
|
||||
std::string activation,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
nvinfer1::ITensor* activationLayer(int layerIdx, std::string activation, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network, std::string layerName = "");
|
||||
|
||||
#endif
|
||||
|
||||
@@ -3,108 +3,94 @@
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
#include "batchnorm_layer.h"
|
||||
|
||||
nvinfer1::ITensor* batchnormLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights,
|
||||
int& weightPtr,
|
||||
std::string weightsType,
|
||||
float eps,
|
||||
nvinfer1::ITensor* input,
|
||||
#include <cassert>
|
||||
#include <math.h>
|
||||
|
||||
nvinfer1::ITensor*
|
||||
batchnormLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, std::string weightsType, float eps, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "batchnorm");
|
||||
assert(block.find("filters") != block.end());
|
||||
assert(block.at("type") == "batchnorm");
|
||||
assert(block.find("filters") != block.end());
|
||||
|
||||
int filters = std::stoi(block.at("filters"));
|
||||
std::string activation = block.at("activation");
|
||||
int filters = std::stoi(block.at("filters"));
|
||||
std::string activation = block.at("activation");
|
||||
|
||||
std::vector<float> bnBiases;
|
||||
std::vector<float> bnWeights;
|
||||
std::vector<float> bnRunningMean;
|
||||
std::vector<float> bnRunningVar;
|
||||
std::vector<float> bnBiases;
|
||||
std::vector<float> bnWeights;
|
||||
std::vector<float> bnRunningMean;
|
||||
std::vector<float> bnRunningVar;
|
||||
|
||||
if (weightsType == "weights")
|
||||
{
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnBiases.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnWeights.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnRunningMean.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnRunningVar.push_back(sqrt(weights[weightPtr] + 1.0e-5));
|
||||
weightPtr++;
|
||||
}
|
||||
if (weightsType == "weights") {
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnBiases.push_back(weights[weightPtr]);
|
||||
++weightPtr;
|
||||
}
|
||||
else
|
||||
{
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnWeights.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnBiases.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnRunningMean.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnRunningVar.push_back(sqrt(weights[weightPtr] + eps));
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnWeights.push_back(weights[weightPtr]);
|
||||
++weightPtr;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnRunningMean.push_back(weights[weightPtr]);
|
||||
++weightPtr;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnRunningVar.push_back(sqrt(weights[weightPtr] + 1.0e-5));
|
||||
++weightPtr;
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnWeights.push_back(weights[weightPtr]);
|
||||
++weightPtr;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnBiases.push_back(weights[weightPtr]);
|
||||
++weightPtr;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnRunningMean.push_back(weights[weightPtr]);
|
||||
++weightPtr;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnRunningVar.push_back(sqrt(weights[weightPtr] + eps));
|
||||
++weightPtr;
|
||||
}
|
||||
}
|
||||
|
||||
int size = filters;
|
||||
nvinfer1::Weights shift{nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
nvinfer1::Weights scale{nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
nvinfer1::Weights power{nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
float* shiftWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
shiftWt[i] = bnBiases.at(i) - ((bnRunningMean.at(i) * bnWeights.at(i)) / bnRunningVar.at(i));
|
||||
shift.values = shiftWt;
|
||||
float* scaleWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
scaleWt[i] = bnWeights.at(i) / bnRunningVar[i];
|
||||
scale.values = scaleWt;
|
||||
float* powerWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
powerWt[i] = 1.0;
|
||||
power.values = powerWt;
|
||||
trtWeights.push_back(shift);
|
||||
trtWeights.push_back(scale);
|
||||
trtWeights.push_back(power);
|
||||
int size = filters;
|
||||
nvinfer1::Weights shift {nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
nvinfer1::Weights scale {nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
nvinfer1::Weights power {nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
float* shiftWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
shiftWt[i] = bnBiases.at(i) - ((bnRunningMean.at(i) * bnWeights.at(i)) / bnRunningVar.at(i));
|
||||
shift.values = shiftWt;
|
||||
float* scaleWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
scaleWt[i] = bnWeights.at(i) / bnRunningVar[i];
|
||||
scale.values = scaleWt;
|
||||
float* powerWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
powerWt[i] = 1.0;
|
||||
power.values = powerWt;
|
||||
trtWeights.push_back(shift);
|
||||
trtWeights.push_back(scale);
|
||||
trtWeights.push_back(power);
|
||||
|
||||
nvinfer1::IScaleLayer* batchnorm = network->addScale(*input, nvinfer1::ScaleMode::kCHANNEL, shift, scale, power);
|
||||
assert(batchnorm != nullptr);
|
||||
std::string batchnormLayerName = "batchnorm_" + std::to_string(layerIdx);
|
||||
batchnorm->setName(batchnormLayerName.c_str());
|
||||
output = batchnorm->getOutput(0);
|
||||
nvinfer1::IScaleLayer* batchnorm = network->addScale(*input, nvinfer1::ScaleMode::kCHANNEL, shift, scale, power);
|
||||
assert(batchnorm != nullptr);
|
||||
std::string batchnormLayerName = "batchnorm_" + std::to_string(layerIdx);
|
||||
batchnorm->setName(batchnormLayerName.c_str());
|
||||
output = batchnorm->getOutput(0);
|
||||
|
||||
output = activationLayer(layerIdx, activation, output, network);
|
||||
assert(output != nullptr);
|
||||
output = activationLayer(layerIdx, activation, output, network);
|
||||
assert(output != nullptr);
|
||||
|
||||
return output;
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -13,15 +13,8 @@
|
||||
|
||||
#include "activation_layer.h"
|
||||
|
||||
nvinfer1::ITensor* batchnormLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights,
|
||||
int& weightPtr,
|
||||
std::string weightsType,
|
||||
float eps,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* batchnormLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, std::string weightsType, float eps, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
|
||||
82
nvdsinfer_custom_impl_Yolo/layers/c2f_layer.cpp
Normal file
82
nvdsinfer_custom_impl_Yolo/layers/c2f_layer.cpp
Normal file
@@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#include "c2f_layer.h"
|
||||
|
||||
#include <cassert>
|
||||
|
||||
#include "convolutional_layer.h"
|
||||
|
||||
nvinfer1::ITensor*
|
||||
c2fLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, std::string weightsType, float eps, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "c2f");
|
||||
assert(block.find("n") != block.end());
|
||||
assert(block.find("shortcut") != block.end());
|
||||
assert(block.find("filters") != block.end());
|
||||
|
||||
int n = std::stoi(block.at("n"));
|
||||
bool shortcut = (block.at("shortcut") == "1");
|
||||
int filters = std::stoi(block.at("filters"));
|
||||
|
||||
nvinfer1::Dims inputDims = input->getDimensions();
|
||||
|
||||
nvinfer1::ISliceLayer* sliceLt = network->addSlice(*input,nvinfer1::Dims{3, {0, 0, 0}},
|
||||
nvinfer1::Dims{3, {inputDims.d[0] / 2, inputDims.d[1], inputDims.d[2]}}, nvinfer1::Dims{3, {1, 1, 1}});
|
||||
assert(sliceLt != nullptr);
|
||||
std::string sliceLtLayerName = "slice_lt_" + std::to_string(layerIdx);
|
||||
sliceLt->setName(sliceLtLayerName.c_str());
|
||||
nvinfer1::ITensor* lt = sliceLt->getOutput(0);
|
||||
|
||||
nvinfer1::ISliceLayer* sliceRb = network->addSlice(*input,nvinfer1::Dims{3, {inputDims.d[0] / 2, 0, 0}},
|
||||
nvinfer1::Dims{3, {inputDims.d[0] / 2, inputDims.d[1], inputDims.d[2]}}, nvinfer1::Dims{3, {1, 1, 1}});
|
||||
assert(sliceRb != nullptr);
|
||||
std::string sliceRbLayerName = "slice_rb_" + std::to_string(layerIdx);
|
||||
sliceRb->setName(sliceRbLayerName.c_str());
|
||||
nvinfer1::ITensor* rb = sliceRb->getOutput(0);
|
||||
|
||||
std::vector<nvinfer1::ITensor*> concatInputs;
|
||||
concatInputs.push_back(lt);
|
||||
concatInputs.push_back(rb);
|
||||
output = rb;
|
||||
|
||||
for (int i = 0; i < n; ++i) {
|
||||
std::string cv1MlayerName = "c2f_1_" + std::to_string(i + 1) + "_";
|
||||
nvinfer1::ITensor* cv1M = convolutionalLayer(layerIdx, block, weights, trtWeights, weightPtr, weightsType, filters, eps,
|
||||
output, network, cv1MlayerName);
|
||||
assert(cv1M != nullptr);
|
||||
|
||||
std::string cv2MlayerName = "c2f_2_" + std::to_string(i + 1) + "_";
|
||||
nvinfer1::ITensor* cv2M = convolutionalLayer(layerIdx, block, weights, trtWeights, weightPtr, weightsType, filters, eps,
|
||||
cv1M, network, cv2MlayerName);
|
||||
assert(cv2M != nullptr);
|
||||
|
||||
if (shortcut) {
|
||||
nvinfer1::IElementWiseLayer* ew = network->addElementWise(*rb, *cv2M, nvinfer1::ElementWiseOperation::kSUM);
|
||||
assert(ew != nullptr);
|
||||
std::string ewLayerName = "shortcut_c2f_" + std::to_string(i + 1) + "_" + std::to_string(layerIdx);
|
||||
ew->setName(ewLayerName.c_str());
|
||||
output = ew->getOutput(0);
|
||||
concatInputs.push_back(output);
|
||||
}
|
||||
else {
|
||||
output = cv2M;
|
||||
concatInputs.push_back(output);
|
||||
}
|
||||
}
|
||||
|
||||
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(concatInputs.data(), concatInputs.size());
|
||||
assert(concat != nullptr);
|
||||
std::string concatLayerName = "route_" + std::to_string(layerIdx);
|
||||
concat->setName(concatLayerName.c_str());
|
||||
concat->setAxis(0);
|
||||
output = concat->getOutput(0);
|
||||
|
||||
return output;
|
||||
}
|
||||
18
nvdsinfer_custom_impl_Yolo/layers/c2f_layer.h
Normal file
18
nvdsinfer_custom_impl_Yolo/layers/c2f_layer.h
Normal file
@@ -0,0 +1,18 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#ifndef __C2F_LAYER_H__
|
||||
#define __C2F_LAYER_H__
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ITensor* c2fLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, std::string weightsType, float eps, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
@@ -5,33 +5,32 @@
|
||||
|
||||
#include "channels_layer.h"
|
||||
|
||||
nvinfer1::ITensor* channelsLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* implicitTensor,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
#include <cassert>
|
||||
|
||||
nvinfer1::ITensor*
|
||||
channelsLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* implicitTensor, nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "shift_channels" || block.at("type") == "control_channels");
|
||||
assert(block.at("type") == "shift_channels" || block.at("type") == "control_channels");
|
||||
|
||||
if (block.at("type") == "shift_channels") {
|
||||
nvinfer1::IElementWiseLayer* shift
|
||||
= network->addElementWise(*input, *implicitTensor, nvinfer1::ElementWiseOperation::kSUM);
|
||||
assert(shift != nullptr);
|
||||
std::string shiftLayerName = "shift_channels_" + std::to_string(layerIdx);
|
||||
shift->setName(shiftLayerName.c_str());
|
||||
output = shift->getOutput(0);
|
||||
}
|
||||
else if (block.at("type") == "control_channels") {
|
||||
nvinfer1::IElementWiseLayer* control
|
||||
= network->addElementWise(*input, *implicitTensor, nvinfer1::ElementWiseOperation::kPROD);
|
||||
assert(control != nullptr);
|
||||
std::string controlLayerName = "control_channels_" + std::to_string(layerIdx);
|
||||
control->setName(controlLayerName.c_str());
|
||||
output = control->getOutput(0);
|
||||
}
|
||||
if (block.at("type") == "shift_channels") {
|
||||
nvinfer1::IElementWiseLayer* shift = network->addElementWise(*input, *implicitTensor,
|
||||
nvinfer1::ElementWiseOperation::kSUM);
|
||||
assert(shift != nullptr);
|
||||
std::string shiftLayerName = "shift_channels_" + std::to_string(layerIdx);
|
||||
shift->setName(shiftLayerName.c_str());
|
||||
output = shift->getOutput(0);
|
||||
}
|
||||
else if (block.at("type") == "control_channels") {
|
||||
nvinfer1::IElementWiseLayer* control = network->addElementWise(*input, *implicitTensor,
|
||||
nvinfer1::ElementWiseOperation::kPROD);
|
||||
assert(control != nullptr);
|
||||
std::string controlLayerName = "control_channels_" + std::to_string(layerIdx);
|
||||
control->setName(controlLayerName.c_str());
|
||||
output = control->getOutput(0);
|
||||
}
|
||||
|
||||
return output;
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -7,15 +7,10 @@
|
||||
#define __CHANNELS_LAYER_H__
|
||||
|
||||
#include <map>
|
||||
#include <cassert>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ITensor* channelsLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* implicitTensor,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
nvinfer1::ITensor* channelsLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* implicitTensor, nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -5,25 +5,25 @@
|
||||
|
||||
#include "cls_layer.h"
|
||||
|
||||
nvinfer1::ITensor* clsLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
#include <cassert>
|
||||
|
||||
nvinfer1::ITensor*
|
||||
clsLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "cls");
|
||||
assert(block.at("type") == "cls");
|
||||
|
||||
nvinfer1::IShuffleLayer* shuffle = network->addShuffle(*input);
|
||||
assert(shuffle != nullptr);
|
||||
std::string shuffleLayerName = "shuffle_" + std::to_string(layerIdx);
|
||||
shuffle->setName(shuffleLayerName.c_str());
|
||||
nvinfer1::Permutation permutation;
|
||||
permutation.order[0] = 1;
|
||||
permutation.order[1] = 0;
|
||||
shuffle->setFirstTranspose(permutation);
|
||||
output = shuffle->getOutput(0);
|
||||
nvinfer1::IShuffleLayer* shuffle = network->addShuffle(*input);
|
||||
assert(shuffle != nullptr);
|
||||
std::string shuffleLayerName = "shuffle_" + std::to_string(layerIdx);
|
||||
shuffle->setName(shuffleLayerName.c_str());
|
||||
nvinfer1::Permutation permutation;
|
||||
permutation.order[0] = 1;
|
||||
permutation.order[1] = 0;
|
||||
shuffle->setFirstTranspose(permutation);
|
||||
output = shuffle->getOutput(0);
|
||||
|
||||
return output;
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -7,14 +7,10 @@
|
||||
#define __CLS_LAYER_H__
|
||||
|
||||
#include <map>
|
||||
#include <cassert>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ITensor* clsLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* clsLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -3,224 +3,197 @@
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
#include "convolutional_layer.h"
|
||||
|
||||
nvinfer1::ITensor* convolutionalLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights,
|
||||
int& weightPtr,
|
||||
std::string weightsType,
|
||||
int& inputChannels,
|
||||
float eps,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
#include <cassert>
|
||||
#include <math.h>
|
||||
|
||||
nvinfer1::ITensor*
|
||||
convolutionalLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, std::string weightsType, int& inputChannels, float eps,
|
||||
nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network, std::string layerName)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "convolutional");
|
||||
assert(block.find("filters") != block.end());
|
||||
assert(block.find("pad") != block.end());
|
||||
assert(block.find("size") != block.end());
|
||||
assert(block.find("stride") != block.end());
|
||||
assert(block.at("type") == "convolutional" || block.at("type") == "c2f");
|
||||
assert(block.find("filters") != block.end());
|
||||
assert(block.find("pad") != block.end());
|
||||
assert(block.find("size") != block.end());
|
||||
assert(block.find("stride") != block.end());
|
||||
|
||||
int filters = std::stoi(block.at("filters"));
|
||||
int padding = std::stoi(block.at("pad"));
|
||||
int kernelSize = std::stoi(block.at("size"));
|
||||
int stride = std::stoi(block.at("stride"));
|
||||
std::string activation = block.at("activation");
|
||||
int bias = filters;
|
||||
int filters = std::stoi(block.at("filters"));
|
||||
int padding = std::stoi(block.at("pad"));
|
||||
int kernelSize = std::stoi(block.at("size"));
|
||||
int stride = std::stoi(block.at("stride"));
|
||||
std::string activation = block.at("activation");
|
||||
int bias = filters;
|
||||
|
||||
bool batchNormalize = false;
|
||||
if (block.find("batch_normalize") != block.end())
|
||||
{
|
||||
bias = 0;
|
||||
batchNormalize = (block.at("batch_normalize") == "1");
|
||||
}
|
||||
bool batchNormalize = false;
|
||||
if (block.find("batch_normalize") != block.end()) {
|
||||
bias = 0;
|
||||
batchNormalize = (block.at("batch_normalize") == "1");
|
||||
}
|
||||
|
||||
int groups = 1;
|
||||
if (block.find("groups") != block.end())
|
||||
groups = std::stoi(block.at("groups"));
|
||||
int groups = 1;
|
||||
if (block.find("groups") != block.end())
|
||||
groups = std::stoi(block.at("groups"));
|
||||
|
||||
if (block.find("bias") != block.end())
|
||||
bias = std::stoi(block.at("bias"));
|
||||
if (block.find("bias") != block.end())
|
||||
bias = std::stoi(block.at("bias"));
|
||||
|
||||
int pad;
|
||||
if (padding)
|
||||
pad = (kernelSize - 1) / 2;
|
||||
else
|
||||
pad = 0;
|
||||
int pad;
|
||||
if (padding)
|
||||
pad = (kernelSize - 1) / 2;
|
||||
else
|
||||
pad = 0;
|
||||
|
||||
int size = filters * inputChannels * kernelSize * kernelSize / groups;
|
||||
std::vector<float> bnBiases;
|
||||
std::vector<float> bnWeights;
|
||||
std::vector<float> bnRunningMean;
|
||||
std::vector<float> bnRunningVar;
|
||||
nvinfer1::Weights convWt{nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
nvinfer1::Weights convBias{nvinfer1::DataType::kFLOAT, nullptr, bias};
|
||||
int size = filters * inputChannels * kernelSize * kernelSize / groups;
|
||||
std::vector<float> bnBiases;
|
||||
std::vector<float> bnWeights;
|
||||
std::vector<float> bnRunningMean;
|
||||
std::vector<float> bnRunningVar;
|
||||
nvinfer1::Weights convWt {nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
nvinfer1::Weights convBias {nvinfer1::DataType::kFLOAT, nullptr, bias};
|
||||
|
||||
if (weightsType == "weights")
|
||||
{
|
||||
if (batchNormalize == false)
|
||||
{
|
||||
float* val;
|
||||
if (bias != 0) {
|
||||
val = new float[filters];
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
val[i] = weights[weightPtr];
|
||||
weightPtr++;
|
||||
}
|
||||
convBias.values = val;
|
||||
trtWeights.push_back(convBias);
|
||||
}
|
||||
val = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
val[i] = weights[weightPtr];
|
||||
weightPtr++;
|
||||
}
|
||||
convWt.values = val;
|
||||
trtWeights.push_back(convWt);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnBiases.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnWeights.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnRunningMean.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnRunningVar.push_back(sqrt(weights[weightPtr] + 1.0e-5));
|
||||
weightPtr++;
|
||||
}
|
||||
float* val = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
val[i] = weights[weightPtr];
|
||||
weightPtr++;
|
||||
}
|
||||
convWt.values = val;
|
||||
trtWeights.push_back(convWt);
|
||||
if (bias != 0)
|
||||
trtWeights.push_back(convBias);
|
||||
if (weightsType == "weights") {
|
||||
if (batchNormalize == false) {
|
||||
float* val;
|
||||
if (bias != 0) {
|
||||
val = new float[filters];
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
val[i] = weights[weightPtr];
|
||||
++weightPtr;
|
||||
}
|
||||
convBias.values = val;
|
||||
trtWeights.push_back(convBias);
|
||||
}
|
||||
val = new float[size];
|
||||
for (int i = 0; i < size; ++i) {
|
||||
val[i] = weights[weightPtr];
|
||||
++weightPtr;
|
||||
}
|
||||
convWt.values = val;
|
||||
trtWeights.push_back(convWt);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (batchNormalize == false)
|
||||
{
|
||||
float* val = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
val[i] = weights[weightPtr];
|
||||
weightPtr++;
|
||||
}
|
||||
convWt.values = val;
|
||||
trtWeights.push_back(convWt);
|
||||
if (bias != 0) {
|
||||
val = new float[filters];
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
val[i] = weights[weightPtr];
|
||||
weightPtr++;
|
||||
}
|
||||
convBias.values = val;
|
||||
trtWeights.push_back(convBias);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
float* val = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
{
|
||||
val[i] = weights[weightPtr];
|
||||
weightPtr++;
|
||||
}
|
||||
convWt.values = val;
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnWeights.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnBiases.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnRunningMean.push_back(weights[weightPtr]);
|
||||
weightPtr++;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
bnRunningVar.push_back(sqrt(weights[weightPtr] + eps));
|
||||
weightPtr++;
|
||||
}
|
||||
trtWeights.push_back(convWt);
|
||||
if (bias != 0)
|
||||
trtWeights.push_back(convBias);
|
||||
}
|
||||
else {
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnBiases.push_back(weights[weightPtr]);
|
||||
++weightPtr;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnWeights.push_back(weights[weightPtr]);
|
||||
++weightPtr;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnRunningMean.push_back(weights[weightPtr]);
|
||||
++weightPtr;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnRunningVar.push_back(sqrt(weights[weightPtr] + 1.0e-5));
|
||||
++weightPtr;
|
||||
}
|
||||
float* val = new float[size];
|
||||
for (int i = 0; i < size; ++i) {
|
||||
val[i] = weights[weightPtr];
|
||||
++weightPtr;
|
||||
}
|
||||
convWt.values = val;
|
||||
trtWeights.push_back(convWt);
|
||||
if (bias != 0)
|
||||
trtWeights.push_back(convBias);
|
||||
}
|
||||
|
||||
nvinfer1::IConvolutionLayer* conv
|
||||
= network->addConvolutionNd(*input, filters, nvinfer1::Dims{2, {kernelSize, kernelSize}}, convWt, convBias);
|
||||
assert(conv != nullptr);
|
||||
std::string convLayerName = "conv_" + std::to_string(layerIdx);
|
||||
conv->setName(convLayerName.c_str());
|
||||
conv->setStrideNd(nvinfer1::Dims{2, {stride, stride}});
|
||||
conv->setPaddingNd(nvinfer1::Dims{2, {pad, pad}});
|
||||
|
||||
if (block.find("groups") != block.end())
|
||||
conv->setNbGroups(groups);
|
||||
|
||||
output = conv->getOutput(0);
|
||||
|
||||
if (batchNormalize == true)
|
||||
{
|
||||
size = filters;
|
||||
nvinfer1::Weights shift{nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
nvinfer1::Weights scale{nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
nvinfer1::Weights power{nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
float* shiftWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
shiftWt[i] = bnBiases.at(i) - ((bnRunningMean.at(i) * bnWeights.at(i)) / bnRunningVar.at(i));
|
||||
shift.values = shiftWt;
|
||||
float* scaleWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
scaleWt[i] = bnWeights.at(i) / bnRunningVar[i];
|
||||
scale.values = scaleWt;
|
||||
float* powerWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
powerWt[i] = 1.0;
|
||||
power.values = powerWt;
|
||||
trtWeights.push_back(shift);
|
||||
trtWeights.push_back(scale);
|
||||
trtWeights.push_back(power);
|
||||
|
||||
nvinfer1::IScaleLayer* batchnorm = network->addScale(*output, nvinfer1::ScaleMode::kCHANNEL, shift, scale, power);
|
||||
assert(batchnorm != nullptr);
|
||||
std::string batchnormLayerName = "batchnorm_" + std::to_string(layerIdx);
|
||||
batchnorm->setName(batchnormLayerName.c_str());
|
||||
output = batchnorm->getOutput(0);
|
||||
}
|
||||
else {
|
||||
if (batchNormalize == false) {
|
||||
float* val = new float[size];
|
||||
for (int i = 0; i < size; ++i) {
|
||||
val[i] = weights[weightPtr];
|
||||
++weightPtr;
|
||||
}
|
||||
convWt.values = val;
|
||||
trtWeights.push_back(convWt);
|
||||
if (bias != 0) {
|
||||
val = new float[filters];
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
val[i] = weights[weightPtr];
|
||||
++weightPtr;
|
||||
}
|
||||
convBias.values = val;
|
||||
trtWeights.push_back(convBias);
|
||||
}
|
||||
}
|
||||
else {
|
||||
float* val = new float[size];
|
||||
for (int i = 0; i < size; ++i) {
|
||||
val[i] = weights[weightPtr];
|
||||
++weightPtr;
|
||||
}
|
||||
convWt.values = val;
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnWeights.push_back(weights[weightPtr]);
|
||||
++weightPtr;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnBiases.push_back(weights[weightPtr]);
|
||||
++weightPtr;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnRunningMean.push_back(weights[weightPtr]);
|
||||
++weightPtr;
|
||||
}
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
bnRunningVar.push_back(sqrt(weights[weightPtr] + eps));
|
||||
++weightPtr;
|
||||
}
|
||||
trtWeights.push_back(convWt);
|
||||
if (bias != 0)
|
||||
trtWeights.push_back(convBias);
|
||||
}
|
||||
}
|
||||
|
||||
output = activationLayer(layerIdx, activation, output, network);
|
||||
assert(output != nullptr);
|
||||
nvinfer1::IConvolutionLayer* conv = network->addConvolutionNd(*input, filters, nvinfer1::Dims{2, {kernelSize, kernelSize}},
|
||||
convWt, convBias);
|
||||
assert(conv != nullptr);
|
||||
std::string convLayerName = "conv_" + layerName + std::to_string(layerIdx);
|
||||
conv->setName(convLayerName.c_str());
|
||||
conv->setStrideNd(nvinfer1::Dims{2, {stride, stride}});
|
||||
conv->setPaddingNd(nvinfer1::Dims{2, {pad, pad}});
|
||||
|
||||
return output;
|
||||
if (block.find("groups") != block.end())
|
||||
conv->setNbGroups(groups);
|
||||
|
||||
output = conv->getOutput(0);
|
||||
|
||||
if (batchNormalize == true) {
|
||||
size = filters;
|
||||
nvinfer1::Weights shift {nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
nvinfer1::Weights scale {nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
nvinfer1::Weights power {nvinfer1::DataType::kFLOAT, nullptr, size};
|
||||
float* shiftWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
shiftWt[i] = bnBiases.at(i) - ((bnRunningMean.at(i) * bnWeights.at(i)) / bnRunningVar.at(i));
|
||||
shift.values = shiftWt;
|
||||
float* scaleWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
scaleWt[i] = bnWeights.at(i) / bnRunningVar[i];
|
||||
scale.values = scaleWt;
|
||||
float* powerWt = new float[size];
|
||||
for (int i = 0; i < size; ++i)
|
||||
powerWt[i] = 1.0;
|
||||
power.values = powerWt;
|
||||
trtWeights.push_back(shift);
|
||||
trtWeights.push_back(scale);
|
||||
trtWeights.push_back(power);
|
||||
|
||||
nvinfer1::IScaleLayer* batchnorm = network->addScale(*output, nvinfer1::ScaleMode::kCHANNEL, shift, scale, power);
|
||||
assert(batchnorm != nullptr);
|
||||
std::string batchnormLayerName = "batchnorm_" + layerName + std::to_string(layerIdx);
|
||||
batchnorm->setName(batchnormLayerName.c_str());
|
||||
output = batchnorm->getOutput(0);
|
||||
}
|
||||
|
||||
output = activationLayer(layerIdx, activation, output, network, layerName);
|
||||
assert(output != nullptr);
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -13,16 +13,8 @@
|
||||
|
||||
#include "activation_layer.h"
|
||||
|
||||
nvinfer1::ITensor* convolutionalLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights,
|
||||
int& weightPtr,
|
||||
std::string weightsType,
|
||||
int& inputChannels,
|
||||
float eps,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
nvinfer1::ITensor* convolutionalLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, std::string weightsType, int& inputChannels, float eps,
|
||||
nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network, std::string layerName = "");
|
||||
|
||||
#endif
|
||||
|
||||
196
nvdsinfer_custom_impl_Yolo/layers/detect_v8_layer.cpp
Normal file
196
nvdsinfer_custom_impl_Yolo/layers/detect_v8_layer.cpp
Normal file
@@ -0,0 +1,196 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#include "detect_v8_layer.h"
|
||||
|
||||
#include <cassert>
|
||||
|
||||
nvinfer1::ITensor*
|
||||
detectV8Layer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "detect_v8");
|
||||
assert(block.find("num") != block.end());
|
||||
assert(block.find("classes") != block.end());
|
||||
|
||||
int num = std::stoi(block.at("num"));
|
||||
int classes = std::stoi(block.at("classes"));
|
||||
int reg_max = num / 4;
|
||||
|
||||
nvinfer1::Dims inputDims = input->getDimensions();
|
||||
|
||||
nvinfer1::ISliceLayer* sliceBox = network->addSlice(*input, nvinfer1::Dims{2, {0, 0}},
|
||||
nvinfer1::Dims{2, {num, inputDims.d[1]}}, nvinfer1::Dims{2, {1, 1}});
|
||||
assert(sliceBox != nullptr);
|
||||
std::string sliceBoxLayerName = "slice_box_" + std::to_string(layerIdx);
|
||||
sliceBox->setName(sliceBoxLayerName.c_str());
|
||||
nvinfer1::ITensor* box = sliceBox->getOutput(0);
|
||||
|
||||
nvinfer1::ISliceLayer* sliceCls = network->addSlice(*input, nvinfer1::Dims{2, {num, 0}},
|
||||
nvinfer1::Dims{2, {classes, inputDims.d[1]}}, nvinfer1::Dims{2, {1, 1}});
|
||||
assert(sliceCls != nullptr);
|
||||
std::string sliceClsLayerName = "slice_cls_" + std::to_string(layerIdx);
|
||||
sliceCls->setName(sliceClsLayerName.c_str());
|
||||
nvinfer1::ITensor* cls = sliceCls->getOutput(0);
|
||||
|
||||
nvinfer1::IShuffleLayer* shuffle1Box = network->addShuffle(*box);
|
||||
assert(shuffle1Box != nullptr);
|
||||
std::string shuffle1BoxLayerName = "shuffle1_box_" + std::to_string(layerIdx);
|
||||
shuffle1Box->setName(shuffle1BoxLayerName.c_str());
|
||||
nvinfer1::Dims reshape1Dims = {3, {4, reg_max, inputDims.d[1]}};
|
||||
shuffle1Box->setReshapeDimensions(reshape1Dims);
|
||||
nvinfer1::Permutation permutation1;
|
||||
permutation1.order[0] = 1;
|
||||
permutation1.order[1] = 0;
|
||||
permutation1.order[2] = 2;
|
||||
shuffle1Box->setSecondTranspose(permutation1);
|
||||
box = shuffle1Box->getOutput(0);
|
||||
|
||||
nvinfer1::ISoftMaxLayer* softmax = network->addSoftMax(*box);
|
||||
assert(softmax != nullptr);
|
||||
std::string softmaxLayerName = "softmax_box_" + std::to_string(layerIdx);
|
||||
softmax->setName(softmaxLayerName.c_str());
|
||||
softmax->setAxes(1 << 0);
|
||||
box = softmax->getOutput(0);
|
||||
|
||||
nvinfer1::Weights dflWt {nvinfer1::DataType::kFLOAT, nullptr, reg_max};
|
||||
|
||||
float* val = new float[reg_max];
|
||||
for (int i = 0; i < reg_max; ++i) {
|
||||
val[i] = i;
|
||||
}
|
||||
dflWt.values = val;
|
||||
|
||||
nvinfer1::IConvolutionLayer* conv = network->addConvolutionNd(*box, 1, nvinfer1::Dims{2, {1, 1}}, dflWt,
|
||||
nvinfer1::Weights{});
|
||||
assert(conv != nullptr);
|
||||
std::string convLayerName = "conv_box_" + std::to_string(layerIdx);
|
||||
conv->setName(convLayerName.c_str());
|
||||
conv->setStrideNd(nvinfer1::Dims{2, {1, 1}});
|
||||
conv->setPaddingNd(nvinfer1::Dims{2, {0, 0}});
|
||||
box = conv->getOutput(0);
|
||||
|
||||
nvinfer1::IShuffleLayer* shuffle2Box = network->addShuffle(*box);
|
||||
assert(shuffle2Box != nullptr);
|
||||
std::string shuffle2BoxLayerName = "shuffle2_box_" + std::to_string(layerIdx);
|
||||
shuffle2Box->setName(shuffle2BoxLayerName.c_str());
|
||||
nvinfer1::Dims reshape2Dims = {2, {4, inputDims.d[1]}};
|
||||
shuffle2Box->setReshapeDimensions(reshape2Dims);
|
||||
box = shuffle2Box->getOutput(0);
|
||||
|
||||
nvinfer1::Dims shuffle2BoxDims = box->getDimensions();
|
||||
|
||||
nvinfer1::ISliceLayer* sliceLtBox = network->addSlice(*box, nvinfer1::Dims{2, {0, 0}},
|
||||
nvinfer1::Dims{2, {2, shuffle2BoxDims.d[1]}}, nvinfer1::Dims{2, {1, 1}});
|
||||
assert(sliceLtBox != nullptr);
|
||||
std::string sliceLtBoxLayerName = "slice_lt_box_" + std::to_string(layerIdx);
|
||||
sliceLtBox->setName(sliceLtBoxLayerName.c_str());
|
||||
nvinfer1::ITensor* lt = sliceLtBox->getOutput(0);
|
||||
|
||||
nvinfer1::ISliceLayer* sliceRbBox = network->addSlice(*box, nvinfer1::Dims{2, {2, 0}},
|
||||
nvinfer1::Dims{2, {2, shuffle2BoxDims.d[1]}}, nvinfer1::Dims{2, {1, 1}});
|
||||
assert(sliceRbBox != nullptr);
|
||||
std::string sliceRbBoxLayerName = "slice_rb_box_" + std::to_string(layerIdx);
|
||||
sliceRbBox->setName(sliceRbBoxLayerName.c_str());
|
||||
nvinfer1::ITensor* rb = sliceRbBox->getOutput(0);
|
||||
|
||||
int channels = 2 * shuffle2BoxDims.d[1];
|
||||
nvinfer1::Weights anchorPointsWt {nvinfer1::DataType::kFLOAT, nullptr, channels};
|
||||
val = new float[channels];
|
||||
for (int i = 0; i < channels; ++i) {
|
||||
val[i] = weights[weightPtr];
|
||||
++weightPtr;
|
||||
}
|
||||
anchorPointsWt.values = val;
|
||||
trtWeights.push_back(anchorPointsWt);
|
||||
|
||||
nvinfer1::IConstantLayer* anchorPoints = network->addConstant(nvinfer1::Dims{2, {2, shuffle2BoxDims.d[1]}},
|
||||
anchorPointsWt);
|
||||
assert(anchorPoints != nullptr);
|
||||
std::string anchorPointsLayerName = "anchor_points_" + std::to_string(layerIdx);
|
||||
anchorPoints->setName(anchorPointsLayerName.c_str());
|
||||
nvinfer1::ITensor* anchorPointsTensor = anchorPoints->getOutput(0);
|
||||
|
||||
nvinfer1::IElementWiseLayer* x1y1 = network->addElementWise(*anchorPointsTensor, *lt,
|
||||
nvinfer1::ElementWiseOperation::kSUB);
|
||||
assert(x1y1 != nullptr);
|
||||
std::string x1y1LayerName = "x1y1_" + std::to_string(layerIdx);
|
||||
x1y1->setName(x1y1LayerName.c_str());
|
||||
nvinfer1::ITensor* x1y1Tensor = x1y1->getOutput(0);
|
||||
|
||||
nvinfer1::IElementWiseLayer* x2y2 = network->addElementWise(*rb, *anchorPointsTensor,
|
||||
nvinfer1::ElementWiseOperation::kSUM);
|
||||
assert(x2y2 != nullptr);
|
||||
std::string x2y2LayerName = "x2y2_" + std::to_string(layerIdx);
|
||||
x2y2->setName(x2y2LayerName.c_str());
|
||||
nvinfer1::ITensor* x2y2Tensor = x2y2->getOutput(0);
|
||||
|
||||
std::vector<nvinfer1::ITensor*> concatBoxInputs;
|
||||
concatBoxInputs.push_back(x1y1Tensor);
|
||||
concatBoxInputs.push_back(x2y2Tensor);
|
||||
|
||||
nvinfer1::IConcatenationLayer* concatBox = network->addConcatenation(concatBoxInputs.data(), concatBoxInputs.size());
|
||||
assert(concatBox != nullptr);
|
||||
std::string concatBoxLayerName = "concat_box_" + std::to_string(layerIdx);
|
||||
concatBox->setName(concatBoxLayerName.c_str());
|
||||
concatBox->setAxis(0);
|
||||
box = concatBox->getOutput(0);
|
||||
|
||||
channels = shuffle2BoxDims.d[1];
|
||||
nvinfer1::Weights stridePointsWt {nvinfer1::DataType::kFLOAT, nullptr, channels};
|
||||
val = new float[channels];
|
||||
for (int i = 0; i < channels; ++i) {
|
||||
val[i] = weights[weightPtr];
|
||||
++weightPtr;
|
||||
}
|
||||
stridePointsWt.values = val;
|
||||
trtWeights.push_back(stridePointsWt);
|
||||
|
||||
nvinfer1::IConstantLayer* stridePoints = network->addConstant(nvinfer1::Dims{2, {1, shuffle2BoxDims.d[1]}},
|
||||
stridePointsWt);
|
||||
assert(stridePoints != nullptr);
|
||||
std::string stridePointsLayerName = "stride_points_" + std::to_string(layerIdx);
|
||||
stridePoints->setName(stridePointsLayerName.c_str());
|
||||
nvinfer1::ITensor* stridePointsTensor = stridePoints->getOutput(0);
|
||||
|
||||
nvinfer1::IElementWiseLayer* pred = network->addElementWise(*box, *stridePointsTensor,
|
||||
nvinfer1::ElementWiseOperation::kPROD);
|
||||
assert(pred != nullptr);
|
||||
std::string predLayerName = "pred_" + std::to_string(layerIdx);
|
||||
pred->setName(predLayerName.c_str());
|
||||
box = pred->getOutput(0);
|
||||
|
||||
nvinfer1::IActivationLayer* sigmoid = network->addActivation(*cls, nvinfer1::ActivationType::kSIGMOID);
|
||||
assert(sigmoid != nullptr);
|
||||
std::string sigmoidLayerName = "sigmoid_cls_" + std::to_string(layerIdx);
|
||||
sigmoid->setName(sigmoidLayerName.c_str());
|
||||
cls = sigmoid->getOutput(0);
|
||||
|
||||
std::vector<nvinfer1::ITensor*> concatInputs;
|
||||
concatInputs.push_back(box);
|
||||
concatInputs.push_back(cls);
|
||||
|
||||
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(concatInputs.data(), concatInputs.size());
|
||||
assert(concat != nullptr);
|
||||
std::string concatLayerName = "concat_" + std::to_string(layerIdx);
|
||||
concat->setName(concatLayerName.c_str());
|
||||
concat->setAxis(0);
|
||||
output = concat->getOutput(0);
|
||||
|
||||
nvinfer1::IShuffleLayer* shuffle = network->addShuffle(*output);
|
||||
assert(shuffle != nullptr);
|
||||
std::string shuffleLayerName = "shuffle_" + std::to_string(layerIdx);
|
||||
shuffle->setName(shuffleLayerName.c_str());
|
||||
nvinfer1::Permutation permutation2;
|
||||
permutation2.order[0] = 1;
|
||||
permutation2.order[1] = 0;
|
||||
shuffle->setFirstTranspose(permutation2);
|
||||
output = shuffle->getOutput(0);
|
||||
|
||||
return output;
|
||||
}
|
||||
18
nvdsinfer_custom_impl_Yolo/layers/detect_v8_layer.h
Normal file
18
nvdsinfer_custom_impl_Yolo/layers/detect_v8_layer.h
Normal file
@@ -0,0 +1,18 @@
|
||||
/*
|
||||
* Created by Marcos Luciano
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#ifndef __DETECT_V8_LAYER_H__
|
||||
#define __DETECT_V8_LAYER_H__
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ITensor* detectV8Layer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
@@ -5,37 +5,34 @@
|
||||
|
||||
#include "implicit_layer.h"
|
||||
|
||||
nvinfer1::ITensor* implicitLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights,
|
||||
int& weightPtr,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
#include <cassert>
|
||||
|
||||
nvinfer1::ITensor*
|
||||
implicitLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "implicit_add" || block.at("type") == "implicit_mul");
|
||||
assert(block.find("filters") != block.end());
|
||||
assert(block.at("type") == "implicit_add" || block.at("type") == "implicit_mul");
|
||||
assert(block.find("filters") != block.end());
|
||||
|
||||
int filters = std::stoi(block.at("filters"));
|
||||
int filters = std::stoi(block.at("filters"));
|
||||
|
||||
nvinfer1::Weights convWt{nvinfer1::DataType::kFLOAT, nullptr, filters};
|
||||
nvinfer1::Weights convWt {nvinfer1::DataType::kFLOAT, nullptr, filters};
|
||||
|
||||
float* val = new float[filters];
|
||||
for (int i = 0; i < filters; ++i)
|
||||
{
|
||||
val[i] = weights[weightPtr];
|
||||
weightPtr++;
|
||||
}
|
||||
convWt.values = val;
|
||||
trtWeights.push_back(convWt);
|
||||
float* val = new float[filters];
|
||||
for (int i = 0; i < filters; ++i) {
|
||||
val[i] = weights[weightPtr];
|
||||
++weightPtr;
|
||||
}
|
||||
convWt.values = val;
|
||||
trtWeights.push_back(convWt);
|
||||
|
||||
nvinfer1::IConstantLayer* implicit = network->addConstant(nvinfer1::Dims{3, {filters, 1, 1}}, convWt);
|
||||
assert(implicit != nullptr);
|
||||
std::string implicitLayerName = block.at("type") + "_" + std::to_string(layerIdx);
|
||||
implicit->setName(implicitLayerName.c_str());
|
||||
output = implicit->getOutput(0);
|
||||
nvinfer1::IConstantLayer* implicit = network->addConstant(nvinfer1::Dims{3, {filters, 1, 1}}, convWt);
|
||||
assert(implicit != nullptr);
|
||||
std::string implicitLayerName = block.at("type") + "_" + std::to_string(layerIdx);
|
||||
implicit->setName(implicitLayerName.c_str());
|
||||
output = implicit->getOutput(0);
|
||||
|
||||
return output;
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -8,16 +8,10 @@
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ITensor* implicitLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights,
|
||||
int& weightPtr,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
nvinfer1::ITensor* implicitLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -5,53 +5,50 @@
|
||||
|
||||
#include "pooling_layer.h"
|
||||
|
||||
nvinfer1::ITensor* poolingLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
|
||||
nvinfer1::ITensor*
|
||||
poolingLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "maxpool" || block.at("type") == "avgpool");
|
||||
assert(block.at("type") == "maxpool" || block.at("type") == "avgpool");
|
||||
|
||||
if (block.at("type") == "maxpool")
|
||||
{
|
||||
assert(block.find("size") != block.end());
|
||||
assert(block.find("stride") != block.end());
|
||||
if (block.at("type") == "maxpool") {
|
||||
assert(block.find("size") != block.end());
|
||||
assert(block.find("stride") != block.end());
|
||||
|
||||
int size = std::stoi(block.at("size"));
|
||||
int stride = std::stoi(block.at("stride"));
|
||||
int size = std::stoi(block.at("size"));
|
||||
int stride = std::stoi(block.at("stride"));
|
||||
|
||||
nvinfer1::IPoolingLayer* maxpool
|
||||
= network->addPoolingNd(*input, nvinfer1::PoolingType::kMAX, nvinfer1::Dims{2, {size, size}});
|
||||
assert(maxpool != nullptr);
|
||||
std::string maxpoolLayerName = "maxpool_" + std::to_string(layerIdx);
|
||||
maxpool->setName(maxpoolLayerName.c_str());
|
||||
maxpool->setStrideNd(nvinfer1::Dims{2, {stride, stride}});
|
||||
maxpool->setPaddingNd(nvinfer1::Dims{2, {(size - 1) / 2, (size - 1) / 2}});
|
||||
if (size == 2 && stride == 1)
|
||||
{
|
||||
maxpool->setPrePadding(nvinfer1::Dims{2, {0, 0}});
|
||||
maxpool->setPostPadding(nvinfer1::Dims{2, {1, 1}});
|
||||
}
|
||||
output = maxpool->getOutput(0);
|
||||
}
|
||||
else if (block.at("type") == "avgpool")
|
||||
{
|
||||
nvinfer1::Dims inputDims = input->getDimensions();
|
||||
nvinfer1::IPoolingLayer* avgpool = network->addPoolingNd(
|
||||
*input, nvinfer1::PoolingType::kAVERAGE, nvinfer1::Dims{2, {inputDims.d[1], inputDims.d[2]}});
|
||||
assert(avgpool != nullptr);
|
||||
std::string avgpoolLayerName = "avgpool_" + std::to_string(layerIdx);
|
||||
avgpool->setName(avgpoolLayerName.c_str());
|
||||
output = avgpool->getOutput(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cerr << "Pooling not supported: " << block.at("type") << std::endl;
|
||||
std::abort();
|
||||
nvinfer1::IPoolingLayer* maxpool = network->addPoolingNd(*input, nvinfer1::PoolingType::kMAX,
|
||||
nvinfer1::Dims{2, {size, size}});
|
||||
assert(maxpool != nullptr);
|
||||
std::string maxpoolLayerName = "maxpool_" + std::to_string(layerIdx);
|
||||
maxpool->setName(maxpoolLayerName.c_str());
|
||||
maxpool->setStrideNd(nvinfer1::Dims{2, {stride, stride}});
|
||||
maxpool->setPaddingNd(nvinfer1::Dims{2, {(size - 1) / 2, (size - 1) / 2}});
|
||||
if (size == 2 && stride == 1) {
|
||||
maxpool->setPrePadding(nvinfer1::Dims{2, {0, 0}});
|
||||
maxpool->setPostPadding(nvinfer1::Dims{2, {1, 1}});
|
||||
}
|
||||
output = maxpool->getOutput(0);
|
||||
}
|
||||
else if (block.at("type") == "avgpool") {
|
||||
nvinfer1::Dims inputDims = input->getDimensions();
|
||||
nvinfer1::IPoolingLayer* avgpool = network->addPoolingNd(*input, nvinfer1::PoolingType::kAVERAGE,
|
||||
nvinfer1::Dims{2, {inputDims.d[1], inputDims.d[2]}});
|
||||
assert(avgpool != nullptr);
|
||||
std::string avgpoolLayerName = "avgpool_" + std::to_string(layerIdx);
|
||||
avgpool->setName(avgpoolLayerName.c_str());
|
||||
output = avgpool->getOutput(0);
|
||||
}
|
||||
else {
|
||||
std::cerr << "Pooling not supported: " << block.at("type") << std::endl;
|
||||
assert(0);
|
||||
}
|
||||
|
||||
return output;
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -7,15 +7,10 @@
|
||||
#define __POOLING_LAYER_H__
|
||||
|
||||
#include <map>
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ITensor* poolingLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* poolingLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -5,54 +5,50 @@
|
||||
|
||||
#include "reduce_layer.h"
|
||||
|
||||
nvinfer1::ITensor* reduceLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor*
|
||||
reduceLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "reduce");
|
||||
assert(block.find("mode") != block.end());
|
||||
assert(block.find("axes") != block.end());
|
||||
assert(block.at("type") == "reduce");
|
||||
assert(block.find("mode") != block.end());
|
||||
assert(block.find("axes") != block.end());
|
||||
|
||||
std::string mode = block.at("mode");
|
||||
std::string mode = block.at("mode");
|
||||
|
||||
nvinfer1::ReduceOperation operation;
|
||||
if (mode == "mean")
|
||||
operation = nvinfer1::ReduceOperation::kAVG;
|
||||
nvinfer1::ReduceOperation operation;
|
||||
if (mode == "mean")
|
||||
operation = nvinfer1::ReduceOperation::kAVG;
|
||||
|
||||
std::string strAxes = block.at("axes");
|
||||
std::vector<int32_t> axes;
|
||||
size_t lastPos = 0, pos = 0;
|
||||
while ((pos = strAxes.find(',', lastPos)) != std::string::npos)
|
||||
{
|
||||
int vL = std::stoi(trim(strAxes.substr(lastPos, pos - lastPos)));
|
||||
axes.push_back(vL);
|
||||
lastPos = pos + 1;
|
||||
}
|
||||
if (lastPos < strAxes.length())
|
||||
{
|
||||
std::string lastV = trim(strAxes.substr(lastPos));
|
||||
if (!lastV.empty())
|
||||
axes.push_back(std::stoi(lastV));
|
||||
}
|
||||
assert(!axes.empty());
|
||||
|
||||
uint32_t axisMask = 0;
|
||||
for (int axis : axes)
|
||||
axisMask |= 1 << axis;
|
||||
|
||||
bool keepDims = false;
|
||||
if (block.find("keep") != block.end())
|
||||
keepDims = std::stoi(block.at("keep")) == 1 ? true : false;
|
||||
std::string strAxes = block.at("axes");
|
||||
std::vector<int32_t> axes;
|
||||
size_t lastPos = 0, pos = 0;
|
||||
while ((pos = strAxes.find(',', lastPos)) != std::string::npos) {
|
||||
int vL = std::stoi(trim(strAxes.substr(lastPos, pos - lastPos)));
|
||||
axes.push_back(vL);
|
||||
lastPos = pos + 1;
|
||||
}
|
||||
if (lastPos < strAxes.length()) {
|
||||
std::string lastV = trim(strAxes.substr(lastPos));
|
||||
if (!lastV.empty())
|
||||
axes.push_back(std::stoi(lastV));
|
||||
}
|
||||
assert(!axes.empty());
|
||||
|
||||
nvinfer1::IReduceLayer* reduce = network->addReduce(*input, operation, axisMask, keepDims);
|
||||
assert(reduce != nullptr);
|
||||
std::string reduceLayerName = "reduce_" + std::to_string(layerIdx);
|
||||
reduce->setName(reduceLayerName.c_str());
|
||||
output = reduce->getOutput(0);
|
||||
uint32_t axisMask = 0;
|
||||
for (int axis : axes)
|
||||
axisMask |= 1 << axis;
|
||||
|
||||
return output;
|
||||
bool keepDims = false;
|
||||
if (block.find("keep") != block.end())
|
||||
keepDims = std::stoi(block.at("keep")) == 1 ? true : false;
|
||||
|
||||
nvinfer1::IReduceLayer* reduce = network->addReduce(*input, operation, axisMask, keepDims);
|
||||
assert(reduce != nullptr);
|
||||
std::string reduceLayerName = "reduce_" + std::to_string(layerIdx);
|
||||
reduce->setName(reduceLayerName.c_str());
|
||||
output = reduce->getOutput(0);
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -6,13 +6,9 @@
|
||||
#ifndef __REDUCE_LAYER_H__
|
||||
#define __REDUCE_LAYER_H__
|
||||
|
||||
#include "NvInfer.h"
|
||||
#include "../utils.h"
|
||||
|
||||
nvinfer1::ITensor* reduceLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* reduceLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -5,109 +5,105 @@
|
||||
|
||||
#include "reg_layer.h"
|
||||
|
||||
nvinfer1::ITensor* regLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights,
|
||||
int& weightPtr,
|
||||
nvinfer1::ITensor* input,
|
||||
#include <cassert>
|
||||
|
||||
nvinfer1::ITensor*
|
||||
regLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "reg");
|
||||
assert(block.at("type") == "reg");
|
||||
|
||||
nvinfer1::IShuffleLayer* shuffle = network->addShuffle(*input);
|
||||
assert(shuffle != nullptr);
|
||||
std::string shuffleLayerName = "shuffle_" + std::to_string(layerIdx);
|
||||
shuffle->setName(shuffleLayerName.c_str());
|
||||
nvinfer1::Permutation permutation;
|
||||
permutation.order[0] = 1;
|
||||
permutation.order[1] = 0;
|
||||
shuffle->setFirstTranspose(permutation);
|
||||
output = shuffle->getOutput(0);
|
||||
nvinfer1::Dims shuffleDims = output->getDimensions();
|
||||
nvinfer1::IShuffleLayer* shuffle = network->addShuffle(*input);
|
||||
assert(shuffle != nullptr);
|
||||
std::string shuffleLayerName = "shuffle_" + std::to_string(layerIdx);
|
||||
shuffle->setName(shuffleLayerName.c_str());
|
||||
nvinfer1::Permutation permutation;
|
||||
permutation.order[0] = 1;
|
||||
permutation.order[1] = 0;
|
||||
shuffle->setFirstTranspose(permutation);
|
||||
output = shuffle->getOutput(0);
|
||||
nvinfer1::Dims shuffleDims = output->getDimensions();
|
||||
|
||||
nvinfer1::ISliceLayer* sliceLt = network->addSlice(
|
||||
*output, nvinfer1::Dims{2, {0, 0}}, nvinfer1::Dims{2, {shuffleDims.d[0], 2}}, nvinfer1::Dims{2, {1, 1}});
|
||||
assert(sliceLt != nullptr);
|
||||
std::string sliceLtLayerName = "slice_lt_" + std::to_string(layerIdx);
|
||||
sliceLt->setName(sliceLtLayerName.c_str());
|
||||
nvinfer1::ITensor* lt = sliceLt->getOutput(0);
|
||||
nvinfer1::ISliceLayer* sliceLt = network->addSlice(*output, nvinfer1::Dims{2, {0, 0}},
|
||||
nvinfer1::Dims{2, {shuffleDims.d[0], 2}}, nvinfer1::Dims{2, {1, 1}});
|
||||
assert(sliceLt != nullptr);
|
||||
std::string sliceLtLayerName = "slice_lt_" + std::to_string(layerIdx);
|
||||
sliceLt->setName(sliceLtLayerName.c_str());
|
||||
nvinfer1::ITensor* lt = sliceLt->getOutput(0);
|
||||
|
||||
nvinfer1::ISliceLayer* sliceRb = network->addSlice(
|
||||
*output, nvinfer1::Dims{2, {0, 2}}, nvinfer1::Dims{2, {shuffleDims.d[0], 2}}, nvinfer1::Dims{2, {1, 1}});
|
||||
assert(sliceRb != nullptr);
|
||||
std::string sliceRbLayerName = "slice_rb_" + std::to_string(layerIdx);
|
||||
sliceRb->setName(sliceRbLayerName.c_str());
|
||||
nvinfer1::ITensor* rb = sliceRb->getOutput(0);
|
||||
nvinfer1::ISliceLayer* sliceRb = network->addSlice(*output, nvinfer1::Dims{2, {0, 2}},
|
||||
nvinfer1::Dims{2, {shuffleDims.d[0], 2}}, nvinfer1::Dims{2, {1, 1}});
|
||||
assert(sliceRb != nullptr);
|
||||
std::string sliceRbLayerName = "slice_rb_" + std::to_string(layerIdx);
|
||||
sliceRb->setName(sliceRbLayerName.c_str());
|
||||
nvinfer1::ITensor* rb = sliceRb->getOutput(0);
|
||||
|
||||
int channels = shuffleDims.d[0] * 2;
|
||||
nvinfer1::Weights anchorPointsWt{nvinfer1::DataType::kFLOAT, nullptr, channels};
|
||||
float* val = new float[channels];
|
||||
for (int i = 0; i < channels; ++i)
|
||||
{
|
||||
val[i] = weights[weightPtr];
|
||||
weightPtr++;
|
||||
}
|
||||
anchorPointsWt.values = val;
|
||||
trtWeights.push_back(anchorPointsWt);
|
||||
int channels = shuffleDims.d[0] * 2;
|
||||
nvinfer1::Weights anchorPointsWt {nvinfer1::DataType::kFLOAT, nullptr, channels};
|
||||
float* val = new float[channels];
|
||||
for (int i = 0; i < channels; ++i) {
|
||||
val[i] = weights[weightPtr];
|
||||
++weightPtr;
|
||||
}
|
||||
anchorPointsWt.values = val;
|
||||
trtWeights.push_back(anchorPointsWt);
|
||||
|
||||
nvinfer1::IConstantLayer* anchorPoints = network->addConstant(nvinfer1::Dims{2, {shuffleDims.d[0], 2}}, anchorPointsWt);
|
||||
assert(anchorPoints != nullptr);
|
||||
std::string anchorPointsLayerName = "anchor_points_" + std::to_string(layerIdx);
|
||||
anchorPoints->setName(anchorPointsLayerName.c_str());
|
||||
nvinfer1::ITensor* anchorPointsTensor = anchorPoints->getOutput(0);
|
||||
nvinfer1::IConstantLayer* anchorPoints = network->addConstant(nvinfer1::Dims{2, {shuffleDims.d[0], 2}}, anchorPointsWt);
|
||||
assert(anchorPoints != nullptr);
|
||||
std::string anchorPointsLayerName = "anchor_points_" + std::to_string(layerIdx);
|
||||
anchorPoints->setName(anchorPointsLayerName.c_str());
|
||||
nvinfer1::ITensor* anchorPointsTensor = anchorPoints->getOutput(0);
|
||||
|
||||
nvinfer1::IElementWiseLayer* x1y1
|
||||
= network->addElementWise(*anchorPointsTensor, *lt, nvinfer1::ElementWiseOperation::kSUB);
|
||||
assert(x1y1 != nullptr);
|
||||
std::string x1y1LayerName = "x1y1_" + std::to_string(layerIdx);
|
||||
x1y1->setName(x1y1LayerName.c_str());
|
||||
nvinfer1::ITensor* x1y1Tensor = x1y1->getOutput(0);
|
||||
nvinfer1::IElementWiseLayer* x1y1 = network->addElementWise(*anchorPointsTensor, *lt,
|
||||
nvinfer1::ElementWiseOperation::kSUB);
|
||||
assert(x1y1 != nullptr);
|
||||
std::string x1y1LayerName = "x1y1_" + std::to_string(layerIdx);
|
||||
x1y1->setName(x1y1LayerName.c_str());
|
||||
nvinfer1::ITensor* x1y1Tensor = x1y1->getOutput(0);
|
||||
|
||||
nvinfer1::IElementWiseLayer* x2y2
|
||||
= network->addElementWise(*rb, *anchorPointsTensor, nvinfer1::ElementWiseOperation::kSUM);
|
||||
assert(x2y2 != nullptr);
|
||||
std::string x2y2LayerName = "x2y2_" + std::to_string(layerIdx);
|
||||
x2y2->setName(x2y2LayerName.c_str());
|
||||
nvinfer1::ITensor* x2y2Tensor = x2y2->getOutput(0);
|
||||
nvinfer1::IElementWiseLayer* x2y2 = network->addElementWise(*rb, *anchorPointsTensor,
|
||||
nvinfer1::ElementWiseOperation::kSUM);
|
||||
assert(x2y2 != nullptr);
|
||||
std::string x2y2LayerName = "x2y2_" + std::to_string(layerIdx);
|
||||
x2y2->setName(x2y2LayerName.c_str());
|
||||
nvinfer1::ITensor* x2y2Tensor = x2y2->getOutput(0);
|
||||
|
||||
std::vector<nvinfer1::ITensor*> concatInputs;
|
||||
concatInputs.push_back(x1y1Tensor);
|
||||
concatInputs.push_back(x2y2Tensor);
|
||||
std::vector<nvinfer1::ITensor*> concatInputs;
|
||||
concatInputs.push_back(x1y1Tensor);
|
||||
concatInputs.push_back(x2y2Tensor);
|
||||
|
||||
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(concatInputs.data(), concatInputs.size());
|
||||
assert(concat != nullptr);
|
||||
std::string concatLayerName = "concat_" + std::to_string(layerIdx);
|
||||
concat->setName(concatLayerName.c_str());
|
||||
concat->setAxis(1);
|
||||
output = concat->getOutput(0);
|
||||
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(concatInputs.data(), concatInputs.size());
|
||||
assert(concat != nullptr);
|
||||
std::string concatLayerName = "concat_" + std::to_string(layerIdx);
|
||||
concat->setName(concatLayerName.c_str());
|
||||
concat->setAxis(1);
|
||||
output = concat->getOutput(0);
|
||||
|
||||
channels = shuffleDims.d[0];
|
||||
nvinfer1::Weights stridePointsWt{nvinfer1::DataType::kFLOAT, nullptr, channels};
|
||||
val = new float[channels];
|
||||
for (int i = 0; i < channels; ++i)
|
||||
{
|
||||
val[i] = weights[weightPtr];
|
||||
weightPtr++;
|
||||
}
|
||||
stridePointsWt.values = val;
|
||||
trtWeights.push_back(stridePointsWt);
|
||||
channels = shuffleDims.d[0];
|
||||
nvinfer1::Weights stridePointsWt {nvinfer1::DataType::kFLOAT, nullptr, channels};
|
||||
val = new float[channels];
|
||||
for (int i = 0; i < channels; ++i) {
|
||||
val[i] = weights[weightPtr];
|
||||
++weightPtr;
|
||||
}
|
||||
stridePointsWt.values = val;
|
||||
trtWeights.push_back(stridePointsWt);
|
||||
|
||||
nvinfer1::IConstantLayer* stridePoints = network->addConstant(nvinfer1::Dims{2, {shuffleDims.d[0], 1}}, stridePointsWt);
|
||||
assert(stridePoints != nullptr);
|
||||
std::string stridePointsLayerName = "stride_points_" + std::to_string(layerIdx);
|
||||
stridePoints->setName(stridePointsLayerName.c_str());
|
||||
nvinfer1::ITensor* stridePointsTensor = stridePoints->getOutput(0);
|
||||
nvinfer1::IConstantLayer* stridePoints = network->addConstant(nvinfer1::Dims{2, {shuffleDims.d[0], 1}}, stridePointsWt);
|
||||
assert(stridePoints != nullptr);
|
||||
std::string stridePointsLayerName = "stride_points_" + std::to_string(layerIdx);
|
||||
stridePoints->setName(stridePointsLayerName.c_str());
|
||||
nvinfer1::ITensor* stridePointsTensor = stridePoints->getOutput(0);
|
||||
|
||||
nvinfer1::IElementWiseLayer* pred
|
||||
= network->addElementWise(*output, *stridePointsTensor, nvinfer1::ElementWiseOperation::kPROD);
|
||||
assert(pred != nullptr);
|
||||
std::string predLayerName = "pred_" + std::to_string(layerIdx);
|
||||
pred->setName(predLayerName.c_str());
|
||||
output = pred->getOutput(0);
|
||||
nvinfer1::IElementWiseLayer* pred = network->addElementWise(*output, *stridePointsTensor,
|
||||
nvinfer1::ElementWiseOperation::kPROD);
|
||||
assert(pred != nullptr);
|
||||
std::string predLayerName = "pred_" + std::to_string(layerIdx);
|
||||
pred->setName(predLayerName.c_str());
|
||||
output = pred->getOutput(0);
|
||||
|
||||
return output;
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -8,17 +8,11 @@
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ITensor* regLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights,
|
||||
int& weightPtr,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* regLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
|
||||
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -5,58 +5,55 @@
|
||||
|
||||
#include "reorg_layer.h"
|
||||
|
||||
nvinfer1::ITensor* reorgLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
|
||||
nvinfer1::ITensor*
|
||||
reorgLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "reorg");
|
||||
assert(block.at("type") == "reorg");
|
||||
|
||||
nvinfer1::Dims inputDims = input->getDimensions();
|
||||
nvinfer1::Dims inputDims = input->getDimensions();
|
||||
|
||||
nvinfer1::ISliceLayer *slice1 = network->addSlice(
|
||||
*input, nvinfer1::Dims{3, {0, 0, 0}}, nvinfer1::Dims{3, {inputDims.d[0], inputDims.d[1] / 2, inputDims.d[2] / 2}},
|
||||
nvinfer1::Dims{3, {1, 2, 2}});
|
||||
assert(slice1 != nullptr);
|
||||
std::string slice1LayerName = "slice1_" + std::to_string(layerIdx);
|
||||
slice1->setName(slice1LayerName.c_str());
|
||||
nvinfer1::ISliceLayer *slice1 = network->addSlice(*input, nvinfer1::Dims{3, {0, 0, 0}},
|
||||
nvinfer1::Dims{3, {inputDims.d[0], inputDims.d[1] / 2, inputDims.d[2] / 2}}, nvinfer1::Dims{3, {1, 2, 2}});
|
||||
assert(slice1 != nullptr);
|
||||
std::string slice1LayerName = "slice1_" + std::to_string(layerIdx);
|
||||
slice1->setName(slice1LayerName.c_str());
|
||||
|
||||
nvinfer1::ISliceLayer *slice2 = network->addSlice(
|
||||
*input, nvinfer1::Dims{3, {0, 1, 0}}, nvinfer1::Dims{3, {inputDims.d[0], inputDims.d[1] / 2, inputDims.d[2] / 2}},
|
||||
nvinfer1::Dims{3, {1, 2, 2}});
|
||||
assert(slice2 != nullptr);
|
||||
std::string slice2LayerName = "slice2_" + std::to_string(layerIdx);
|
||||
slice2->setName(slice2LayerName.c_str());
|
||||
nvinfer1::ISliceLayer *slice2 = network->addSlice(*input, nvinfer1::Dims{3, {0, 1, 0}},
|
||||
nvinfer1::Dims{3, {inputDims.d[0], inputDims.d[1] / 2, inputDims.d[2] / 2}}, nvinfer1::Dims{3, {1, 2, 2}});
|
||||
assert(slice2 != nullptr);
|
||||
std::string slice2LayerName = "slice2_" + std::to_string(layerIdx);
|
||||
slice2->setName(slice2LayerName.c_str());
|
||||
|
||||
nvinfer1::ISliceLayer *slice3 = network->addSlice(
|
||||
*input, nvinfer1::Dims{3, {0, 0, 1}}, nvinfer1::Dims{3, {inputDims.d[0], inputDims.d[1] / 2, inputDims.d[2] / 2}},
|
||||
nvinfer1::Dims{3, {1, 2, 2}});
|
||||
assert(slice3 != nullptr);
|
||||
std::string slice3LayerName = "slice3_" + std::to_string(layerIdx);
|
||||
slice3->setName(slice3LayerName.c_str());
|
||||
nvinfer1::ISliceLayer *slice3 = network->addSlice(*input, nvinfer1::Dims{3, {0, 0, 1}},
|
||||
nvinfer1::Dims{3, {inputDims.d[0], inputDims.d[1] / 2, inputDims.d[2] / 2}}, nvinfer1::Dims{3, {1, 2, 2}});
|
||||
assert(slice3 != nullptr);
|
||||
std::string slice3LayerName = "slice3_" + std::to_string(layerIdx);
|
||||
slice3->setName(slice3LayerName.c_str());
|
||||
|
||||
nvinfer1::ISliceLayer *slice4 = network->addSlice(
|
||||
*input, nvinfer1::Dims{3, {0, 1, 1}}, nvinfer1::Dims{3, {inputDims.d[0], inputDims.d[1] / 2, inputDims.d[2] / 2}},
|
||||
nvinfer1::Dims{3, {1, 2, 2}});
|
||||
assert(slice4 != nullptr);
|
||||
std::string slice4LayerName = "slice4_" + std::to_string(layerIdx);
|
||||
slice4->setName(slice4LayerName.c_str());
|
||||
nvinfer1::ISliceLayer *slice4 = network->addSlice(*input, nvinfer1::Dims{3, {0, 1, 1}},
|
||||
nvinfer1::Dims{3, {inputDims.d[0], inputDims.d[1] / 2, inputDims.d[2] / 2}}, nvinfer1::Dims{3, {1, 2, 2}});
|
||||
assert(slice4 != nullptr);
|
||||
std::string slice4LayerName = "slice4_" + std::to_string(layerIdx);
|
||||
slice4->setName(slice4LayerName.c_str());
|
||||
|
||||
std::vector<nvinfer1::ITensor*> concatInputs;
|
||||
concatInputs.push_back(slice1->getOutput(0));
|
||||
concatInputs.push_back(slice2->getOutput(0));
|
||||
concatInputs.push_back(slice3->getOutput(0));
|
||||
concatInputs.push_back(slice4->getOutput(0));
|
||||
std::vector<nvinfer1::ITensor*> concatInputs;
|
||||
concatInputs.push_back(slice1->getOutput(0));
|
||||
concatInputs.push_back(slice2->getOutput(0));
|
||||
concatInputs.push_back(slice3->getOutput(0));
|
||||
concatInputs.push_back(slice4->getOutput(0));
|
||||
|
||||
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(concatInputs.data(), concatInputs.size());
|
||||
assert(concat != nullptr);
|
||||
std::string concatLayerName = "concat_" + std::to_string(layerIdx);
|
||||
concat->setName(concatLayerName.c_str());
|
||||
concat->setAxis(0);
|
||||
output = concat->getOutput(0);
|
||||
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(concatInputs.data(), concatInputs.size());
|
||||
assert(concat != nullptr);
|
||||
std::string concatLayerName = "concat_" + std::to_string(layerIdx);
|
||||
concat->setName(concatLayerName.c_str());
|
||||
concat->setAxis(0);
|
||||
output = concat->getOutput(0);
|
||||
|
||||
return output;
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -3,19 +3,14 @@
|
||||
* https://www.github.com/marcoslucianops
|
||||
*/
|
||||
|
||||
#ifndef __REORGV5_LAYER_H__
|
||||
#define __REORGV5_LAYER_H__
|
||||
#ifndef __REORG_LAYER_H__
|
||||
#define __REORG_LAYER_H__
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ITensor* reorgLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* reorgLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -5,78 +5,70 @@
|
||||
|
||||
#include "route_layer.h"
|
||||
|
||||
nvinfer1::ITensor* routeLayer(
|
||||
int layerIdx,
|
||||
std::string& layers,
|
||||
std::map<std::string, std::string>& block,
|
||||
std::vector<nvinfer1::ITensor*> tensorOutputs,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
nvinfer1::ITensor*
|
||||
routeLayer(int layerIdx, std::string& layers, std::map<std::string, std::string>& block,
|
||||
std::vector<nvinfer1::ITensor*> tensorOutputs, nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "route");
|
||||
assert(block.find("layers") != block.end());
|
||||
assert(block.at("type") == "route");
|
||||
assert(block.find("layers") != block.end());
|
||||
|
||||
std::string strLayers = block.at("layers");
|
||||
std::vector<int> idxLayers;
|
||||
size_t lastPos = 0, pos = 0;
|
||||
while ((pos = strLayers.find(',', lastPos)) != std::string::npos)
|
||||
{
|
||||
int vL = std::stoi(trim(strLayers.substr(lastPos, pos - lastPos)));
|
||||
idxLayers.push_back(vL);
|
||||
lastPos = pos + 1;
|
||||
}
|
||||
if (lastPos < strLayers.length())
|
||||
{
|
||||
std::string lastV = trim(strLayers.substr(lastPos));
|
||||
if (!lastV.empty())
|
||||
idxLayers.push_back(std::stoi(lastV));
|
||||
}
|
||||
assert (!idxLayers.empty());
|
||||
std::vector<nvinfer1::ITensor*> concatInputs;
|
||||
for (uint i = 0; i < idxLayers.size(); ++i)
|
||||
{
|
||||
if (idxLayers[i] < 0)
|
||||
idxLayers[i] = tensorOutputs.size() + idxLayers[i];
|
||||
assert (idxLayers[i] >= 0 && idxLayers[i] < (int)tensorOutputs.size());
|
||||
concatInputs.push_back(tensorOutputs[idxLayers[i]]);
|
||||
if (i < idxLayers.size() - 1)
|
||||
layers += std::to_string(idxLayers[i]) + ", ";
|
||||
}
|
||||
layers += std::to_string(idxLayers[idxLayers.size() - 1]);
|
||||
std::string strLayers = block.at("layers");
|
||||
std::vector<int> idxLayers;
|
||||
size_t lastPos = 0, pos = 0;
|
||||
while ((pos = strLayers.find(',', lastPos)) != std::string::npos) {
|
||||
int vL = std::stoi(trim(strLayers.substr(lastPos, pos - lastPos)));
|
||||
idxLayers.push_back(vL);
|
||||
lastPos = pos + 1;
|
||||
}
|
||||
if (lastPos < strLayers.length()) {
|
||||
std::string lastV = trim(strLayers.substr(lastPos));
|
||||
if (!lastV.empty())
|
||||
idxLayers.push_back(std::stoi(lastV));
|
||||
}
|
||||
assert (!idxLayers.empty());
|
||||
std::vector<nvinfer1::ITensor*> concatInputs;
|
||||
for (uint i = 0; i < idxLayers.size(); ++i) {
|
||||
if (idxLayers[i] < 0)
|
||||
idxLayers[i] = tensorOutputs.size() + idxLayers[i];
|
||||
assert (idxLayers[i] >= 0 && idxLayers[i] < (int)tensorOutputs.size());
|
||||
concatInputs.push_back(tensorOutputs[idxLayers[i]]);
|
||||
if (i < idxLayers.size() - 1)
|
||||
layers += std::to_string(idxLayers[i]) + ", ";
|
||||
}
|
||||
layers += std::to_string(idxLayers[idxLayers.size() - 1]);
|
||||
|
||||
if (concatInputs.size() == 1)
|
||||
output = concatInputs[0];
|
||||
else {
|
||||
int axis = 0;
|
||||
if (block.find("axis") != block.end())
|
||||
axis = std::stoi(block.at("axis"));
|
||||
if (axis < 0)
|
||||
axis = concatInputs[0]->getDimensions().nbDims + axis;
|
||||
if (concatInputs.size() == 1)
|
||||
output = concatInputs[0];
|
||||
else {
|
||||
int axis = 0;
|
||||
if (block.find("axis") != block.end())
|
||||
axis = std::stoi(block.at("axis"));
|
||||
if (axis < 0)
|
||||
axis = concatInputs[0]->getDimensions().nbDims + axis;
|
||||
|
||||
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(concatInputs.data(), concatInputs.size());
|
||||
assert(concat != nullptr);
|
||||
std::string concatLayerName = "route_" + std::to_string(layerIdx);
|
||||
concat->setName(concatLayerName.c_str());
|
||||
concat->setAxis(axis);
|
||||
output = concat->getOutput(0);
|
||||
}
|
||||
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(concatInputs.data(), concatInputs.size());
|
||||
assert(concat != nullptr);
|
||||
std::string concatLayerName = "route_" + std::to_string(layerIdx);
|
||||
concat->setName(concatLayerName.c_str());
|
||||
concat->setAxis(axis);
|
||||
output = concat->getOutput(0);
|
||||
}
|
||||
|
||||
if (block.find("groups") != block.end())
|
||||
{
|
||||
nvinfer1::Dims prevTensorDims = output->getDimensions();
|
||||
int groups = stoi(block.at("groups"));
|
||||
int group_id = stoi(block.at("group_id"));
|
||||
int startSlice = (prevTensorDims.d[0] / groups) * group_id;
|
||||
int channelSlice = (prevTensorDims.d[0] / groups);
|
||||
nvinfer1::ISliceLayer* slice = network->addSlice(
|
||||
*output, nvinfer1::Dims{3, {startSlice, 0, 0}},
|
||||
nvinfer1::Dims{3, {channelSlice, prevTensorDims.d[1], prevTensorDims.d[2]}}, nvinfer1::Dims{3, {1, 1, 1}});
|
||||
assert(slice != nullptr);
|
||||
std::string sliceLayerName = "slice_" + std::to_string(layerIdx);
|
||||
slice->setName(sliceLayerName.c_str());
|
||||
output = slice->getOutput(0);
|
||||
}
|
||||
if (block.find("groups") != block.end()) {
|
||||
nvinfer1::Dims prevTensorDims = output->getDimensions();
|
||||
int groups = stoi(block.at("groups"));
|
||||
int group_id = stoi(block.at("group_id"));
|
||||
int startSlice = (prevTensorDims.d[0] / groups) * group_id;
|
||||
int channelSlice = (prevTensorDims.d[0] / groups);
|
||||
nvinfer1::ISliceLayer* slice = network->addSlice(*output, nvinfer1::Dims{3, {startSlice, 0, 0}},
|
||||
nvinfer1::Dims{3, {channelSlice, prevTensorDims.d[1], prevTensorDims.d[2]}}, nvinfer1::Dims{3, {1, 1, 1}});
|
||||
assert(slice != nullptr);
|
||||
std::string sliceLayerName = "slice_" + std::to_string(layerIdx);
|
||||
slice->setName(sliceLayerName.c_str());
|
||||
output = slice->getOutput(0);
|
||||
}
|
||||
|
||||
return output;
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -6,14 +6,9 @@
|
||||
#ifndef __ROUTE_LAYER_H__
|
||||
#define __ROUTE_LAYER_H__
|
||||
|
||||
#include "NvInfer.h"
|
||||
#include "../utils.h"
|
||||
|
||||
nvinfer1::ITensor* routeLayer(
|
||||
int layerIdx,
|
||||
std::string& layers,
|
||||
std::map<std::string, std::string>& block,
|
||||
std::vector<nvinfer1::ITensor*> tensorOutputs,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
nvinfer1::ITensor* routeLayer(int layerIdx, std::string& layers, std::map<std::string, std::string>& block,
|
||||
std::vector<nvinfer1::ITensor*> tensorOutputs, nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -5,48 +5,41 @@
|
||||
|
||||
#include "shortcut_layer.h"
|
||||
|
||||
nvinfer1::ITensor* shortcutLayer(
|
||||
int layerIdx,
|
||||
std::string mode,
|
||||
std::string activation,
|
||||
std::string inputVol,
|
||||
std::string shortcutVol,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* shortcutInput,
|
||||
#include <cassert>
|
||||
|
||||
nvinfer1::ITensor*
|
||||
shortcutLayer(int layerIdx, std::string mode, std::string activation, std::string inputVol, std::string shortcutVol,
|
||||
std::map<std::string, std::string>& block, nvinfer1::ITensor* input, nvinfer1::ITensor* shortcutInput,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "shortcut");
|
||||
assert(block.at("type") == "shortcut");
|
||||
|
||||
nvinfer1::ElementWiseOperation operation = nvinfer1::ElementWiseOperation::kSUM;
|
||||
nvinfer1::ElementWiseOperation operation = nvinfer1::ElementWiseOperation::kSUM;
|
||||
|
||||
if (mode == "mul")
|
||||
operation = nvinfer1::ElementWiseOperation::kPROD;
|
||||
if (mode == "mul")
|
||||
operation = nvinfer1::ElementWiseOperation::kPROD;
|
||||
|
||||
if (mode == "add" && inputVol != shortcutVol)
|
||||
{
|
||||
nvinfer1::ISliceLayer* slice = network->addSlice(
|
||||
*shortcutInput, nvinfer1::Dims{3, {0, 0, 0}}, input->getDimensions(), nvinfer1::Dims{3, {1, 1, 1}});
|
||||
assert(slice != nullptr);
|
||||
std::string sliceLayerName = "slice_" + std::to_string(layerIdx);
|
||||
slice->setName(sliceLayerName.c_str());
|
||||
output = slice->getOutput(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
output = shortcutInput;
|
||||
}
|
||||
if (mode == "add" && inputVol != shortcutVol) {
|
||||
nvinfer1::ISliceLayer* slice = network->addSlice(*shortcutInput, nvinfer1::Dims{3, {0, 0, 0}}, input->getDimensions(),
|
||||
nvinfer1::Dims{3, {1, 1, 1}});
|
||||
assert(slice != nullptr);
|
||||
std::string sliceLayerName = "slice_" + std::to_string(layerIdx);
|
||||
slice->setName(sliceLayerName.c_str());
|
||||
output = slice->getOutput(0);
|
||||
}
|
||||
else
|
||||
output = shortcutInput;
|
||||
|
||||
nvinfer1::IElementWiseLayer* shortcut = network->addElementWise(*input, *output, operation);
|
||||
assert(shortcut != nullptr);
|
||||
std::string shortcutLayerName = "shortcut_" + std::to_string(layerIdx);
|
||||
shortcut->setName(shortcutLayerName.c_str());
|
||||
output = shortcut->getOutput(0);
|
||||
nvinfer1::IElementWiseLayer* shortcut = network->addElementWise(*input, *output, operation);
|
||||
assert(shortcut != nullptr);
|
||||
std::string shortcutLayerName = "shortcut_" + std::to_string(layerIdx);
|
||||
shortcut->setName(shortcutLayerName.c_str());
|
||||
output = shortcut->getOutput(0);
|
||||
|
||||
output = activationLayer(layerIdx, activation, output, network);
|
||||
assert(output != nullptr);
|
||||
output = activationLayer(layerIdx, activation, output, network);
|
||||
assert(output != nullptr);
|
||||
|
||||
return output;
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -12,15 +12,8 @@
|
||||
|
||||
#include "activation_layer.h"
|
||||
|
||||
nvinfer1::ITensor* shortcutLayer(
|
||||
int layerIdx,
|
||||
std::string mode,
|
||||
std::string activation,
|
||||
std::string inputVol,
|
||||
std::string shortcutVol,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* shortcut,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
nvinfer1::ITensor* shortcutLayer(int layerIdx, std::string mode, std::string activation, std::string inputVol,
|
||||
std::string shortcutVol, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* shortcut, nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -5,119 +5,133 @@
|
||||
|
||||
#include "shuffle_layer.h"
|
||||
|
||||
nvinfer1::ITensor* shuffleLayer(
|
||||
int layerIdx,
|
||||
std::string& layer,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
std::vector<nvinfer1::ITensor*> tensorOutputs,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
nvinfer1::ITensor*
|
||||
shuffleLayer(int layerIdx, std::string& layer, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
std::vector<nvinfer1::ITensor*> tensorOutputs, nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "shuffle");
|
||||
assert(block.at("type") == "shuffle");
|
||||
|
||||
nvinfer1::IShuffleLayer* shuffle = network->addShuffle(*input);
|
||||
assert(shuffle != nullptr);
|
||||
std::string shuffleLayerName = "shuffle_" + std::to_string(layerIdx);
|
||||
shuffle->setName(shuffleLayerName.c_str());
|
||||
nvinfer1::IShuffleLayer* shuffle = network->addShuffle(*input);
|
||||
assert(shuffle != nullptr);
|
||||
std::string shuffleLayerName = "shuffle_" + std::to_string(layerIdx);
|
||||
shuffle->setName(shuffleLayerName.c_str());
|
||||
|
||||
if (block.find("reshape") != block.end())
|
||||
{
|
||||
std::string strReshape = block.at("reshape");
|
||||
std::vector<int32_t> reshape;
|
||||
size_t lastPos = 0, pos = 0;
|
||||
while ((pos = strReshape.find(',', lastPos)) != std::string::npos)
|
||||
{
|
||||
int vL = std::stoi(trim(strReshape.substr(lastPos, pos - lastPos)));
|
||||
reshape.push_back(vL);
|
||||
lastPos = pos + 1;
|
||||
}
|
||||
if (lastPos < strReshape.length())
|
||||
{
|
||||
std::string lastV = trim(strReshape.substr(lastPos));
|
||||
if (!lastV.empty())
|
||||
reshape.push_back(std::stoi(lastV));
|
||||
}
|
||||
assert(!reshape.empty());
|
||||
if (block.find("reshape") != block.end()) {
|
||||
int from = -1;
|
||||
if (block.find("from") != block.end())
|
||||
from = std::stoi(block.at("from"));
|
||||
|
||||
int from = -1;
|
||||
if (block.find("from") != block.end())
|
||||
from = std::stoi(block.at("from"));
|
||||
if (from < 0)
|
||||
from = tensorOutputs.size() + from;
|
||||
|
||||
if (from < 0)
|
||||
from = tensorOutputs.size() + from;
|
||||
layer = std::to_string(from);
|
||||
|
||||
layer = std::to_string(from);
|
||||
nvinfer1::Dims inputTensorDims = tensorOutputs[from]->getDimensions();
|
||||
|
||||
nvinfer1::Dims inputTensorDims = tensorOutputs[from]->getDimensions();
|
||||
int32_t l = inputTensorDims.d[1] * inputTensorDims.d[2];
|
||||
|
||||
nvinfer1::Dims reshapeDims;
|
||||
reshapeDims.nbDims = reshape.size();
|
||||
|
||||
for (uint i = 0; i < reshape.size(); ++i)
|
||||
if (reshape[i] == 0)
|
||||
reshapeDims.d[i] = l;
|
||||
else
|
||||
reshapeDims.d[i] = reshape[i];
|
||||
|
||||
shuffle->setReshapeDimensions(reshapeDims);
|
||||
std::string strReshape = block.at("reshape");
|
||||
std::vector<int32_t> reshape;
|
||||
size_t lastPos = 0, pos = 0;
|
||||
while ((pos = strReshape.find(',', lastPos)) != std::string::npos) {
|
||||
std::string V = trim(strReshape.substr(lastPos, pos - lastPos));
|
||||
if (V == "c")
|
||||
reshape.push_back(inputTensorDims.d[0]);
|
||||
else if (V == "ch")
|
||||
reshape.push_back(inputTensorDims.d[0] * inputTensorDims.d[1]);
|
||||
else if (V == "cw")
|
||||
reshape.push_back(inputTensorDims.d[0] * inputTensorDims.d[2]);
|
||||
else if (V == "h")
|
||||
reshape.push_back(inputTensorDims.d[1]);
|
||||
else if (V == "hw")
|
||||
reshape.push_back(inputTensorDims.d[1] * inputTensorDims.d[2]);
|
||||
else if (V == "w")
|
||||
reshape.push_back(inputTensorDims.d[2]);
|
||||
else if (V == "chw")
|
||||
reshape.push_back(inputTensorDims.d[0] * inputTensorDims.d[1] * inputTensorDims.d[2]);
|
||||
else
|
||||
reshape.push_back(std::stoi(V));
|
||||
lastPos = pos + 1;
|
||||
}
|
||||
|
||||
if (block.find("transpose1") != block.end())
|
||||
{
|
||||
std::string strTranspose1 = block.at("transpose1");
|
||||
std::vector<int32_t> transpose1;
|
||||
size_t lastPos = 0, pos = 0;
|
||||
while ((pos = strTranspose1.find(',', lastPos)) != std::string::npos)
|
||||
{
|
||||
int vL = std::stoi(trim(strTranspose1.substr(lastPos, pos - lastPos)));
|
||||
transpose1.push_back(vL);
|
||||
lastPos = pos + 1;
|
||||
}
|
||||
if (lastPos < strTranspose1.length())
|
||||
{
|
||||
std::string lastV = trim(strTranspose1.substr(lastPos));
|
||||
if (!lastV.empty())
|
||||
transpose1.push_back(std::stoi(lastV));
|
||||
}
|
||||
assert(!transpose1.empty());
|
||||
|
||||
nvinfer1::Permutation permutation1;
|
||||
for (uint i = 0; i < transpose1.size(); ++i)
|
||||
permutation1.order[i] = transpose1[i];
|
||||
|
||||
shuffle->setFirstTranspose(permutation1);
|
||||
if (lastPos < strReshape.length()) {
|
||||
std::string lastV = trim(strReshape.substr(lastPos));
|
||||
if (!lastV.empty()) {
|
||||
if (lastV == "c")
|
||||
reshape.push_back(inputTensorDims.d[0]);
|
||||
else if (lastV == "ch")
|
||||
reshape.push_back(inputTensorDims.d[0] * inputTensorDims.d[1]);
|
||||
else if (lastV == "cw")
|
||||
reshape.push_back(inputTensorDims.d[0] * inputTensorDims.d[2]);
|
||||
else if (lastV == "h")
|
||||
reshape.push_back(inputTensorDims.d[1]);
|
||||
else if (lastV == "hw")
|
||||
reshape.push_back(inputTensorDims.d[1] * inputTensorDims.d[2]);
|
||||
else if (lastV == "w")
|
||||
reshape.push_back(inputTensorDims.d[2]);
|
||||
else if (lastV == "chw")
|
||||
reshape.push_back(inputTensorDims.d[0] * inputTensorDims.d[1] * inputTensorDims.d[2]);
|
||||
else
|
||||
reshape.push_back(std::stoi(lastV));
|
||||
}
|
||||
}
|
||||
assert(!reshape.empty());
|
||||
|
||||
if (block.find("transpose2") != block.end())
|
||||
{
|
||||
std::string strTranspose2 = block.at("transpose2");
|
||||
std::vector<int32_t> transpose2;
|
||||
size_t lastPos = 0, pos = 0;
|
||||
while ((pos = strTranspose2.find(',', lastPos)) != std::string::npos)
|
||||
{
|
||||
int vL = std::stoi(trim(strTranspose2.substr(lastPos, pos - lastPos)));
|
||||
transpose2.push_back(vL);
|
||||
lastPos = pos + 1;
|
||||
}
|
||||
if (lastPos < strTranspose2.length())
|
||||
{
|
||||
std::string lastV = trim(strTranspose2.substr(lastPos));
|
||||
if (!lastV.empty())
|
||||
transpose2.push_back(std::stoi(lastV));
|
||||
}
|
||||
assert(!transpose2.empty());
|
||||
nvinfer1::Dims reshapeDims;
|
||||
reshapeDims.nbDims = reshape.size();
|
||||
|
||||
nvinfer1::Permutation permutation2;
|
||||
for (uint i = 0; i < transpose2.size(); ++i)
|
||||
permutation2.order[i] = transpose2[i];
|
||||
for (uint i = 0; i < reshape.size(); ++i)
|
||||
reshapeDims.d[i] = reshape[i];
|
||||
|
||||
shuffle->setSecondTranspose(permutation2);
|
||||
shuffle->setReshapeDimensions(reshapeDims);
|
||||
}
|
||||
|
||||
if (block.find("transpose1") != block.end()) {
|
||||
std::string strTranspose1 = block.at("transpose1");
|
||||
std::vector<int32_t> transpose1;
|
||||
size_t lastPos = 0, pos = 0;
|
||||
while ((pos = strTranspose1.find(',', lastPos)) != std::string::npos) {
|
||||
int vL = std::stoi(trim(strTranspose1.substr(lastPos, pos - lastPos)));
|
||||
transpose1.push_back(vL);
|
||||
lastPos = pos + 1;
|
||||
}
|
||||
if (lastPos < strTranspose1.length()) {
|
||||
std::string lastV = trim(strTranspose1.substr(lastPos));
|
||||
if (!lastV.empty())
|
||||
transpose1.push_back(std::stoi(lastV));
|
||||
}
|
||||
assert(!transpose1.empty());
|
||||
|
||||
output = shuffle->getOutput(0);
|
||||
nvinfer1::Permutation permutation1;
|
||||
for (uint i = 0; i < transpose1.size(); ++i)
|
||||
permutation1.order[i] = transpose1[i];
|
||||
|
||||
return output;
|
||||
shuffle->setFirstTranspose(permutation1);
|
||||
}
|
||||
|
||||
if (block.find("transpose2") != block.end()) {
|
||||
std::string strTranspose2 = block.at("transpose2");
|
||||
std::vector<int32_t> transpose2;
|
||||
size_t lastPos = 0, pos = 0;
|
||||
while ((pos = strTranspose2.find(',', lastPos)) != std::string::npos) {
|
||||
int vL = std::stoi(trim(strTranspose2.substr(lastPos, pos - lastPos)));
|
||||
transpose2.push_back(vL);
|
||||
lastPos = pos + 1;
|
||||
}
|
||||
if (lastPos < strTranspose2.length()) {
|
||||
std::string lastV = trim(strTranspose2.substr(lastPos));
|
||||
if (!lastV.empty())
|
||||
transpose2.push_back(std::stoi(lastV));
|
||||
}
|
||||
assert(!transpose2.empty());
|
||||
|
||||
nvinfer1::Permutation permutation2;
|
||||
for (uint i = 0; i < transpose2.size(); ++i)
|
||||
permutation2.order[i] = transpose2[i];
|
||||
|
||||
shuffle->setSecondTranspose(permutation2);
|
||||
}
|
||||
|
||||
output = shuffle->getOutput(0);
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -6,15 +6,9 @@
|
||||
#ifndef __SHUFFLE_LAYER_H__
|
||||
#define __SHUFFLE_LAYER_H__
|
||||
|
||||
#include "NvInfer.h"
|
||||
#include "../utils.h"
|
||||
|
||||
nvinfer1::ITensor* shuffleLayer(
|
||||
int layerIdx,
|
||||
std::string& layer,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
std::vector<nvinfer1::ITensor*> tensorOutputs,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
nvinfer1::ITensor* shuffleLayer(int layerIdx, std::string& layer, std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input, std::vector<nvinfer1::ITensor*> tensorOutputs, nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -5,25 +5,25 @@
|
||||
|
||||
#include "softmax_layer.h"
|
||||
|
||||
nvinfer1::ITensor* softmaxLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
#include <cassert>
|
||||
|
||||
nvinfer1::ITensor*
|
||||
softmaxLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "softmax");
|
||||
assert(block.find("axes") != block.end());
|
||||
assert(block.at("type") == "softmax");
|
||||
assert(block.find("axes") != block.end());
|
||||
|
||||
int axes = std::stoi(block.at("axes"));
|
||||
int axes = std::stoi(block.at("axes"));
|
||||
|
||||
nvinfer1::ISoftMaxLayer* softmax = network->addSoftMax(*input);
|
||||
assert(softmax != nullptr);
|
||||
std::string softmaxLayerName = "softmax_" + std::to_string(layerIdx);
|
||||
softmax->setName(softmaxLayerName.c_str());
|
||||
softmax->setAxes(1 << axes);
|
||||
output = softmax->getOutput(0);
|
||||
nvinfer1::ISoftMaxLayer* softmax = network->addSoftMax(*input);
|
||||
assert(softmax != nullptr);
|
||||
std::string softmaxLayerName = "softmax_" + std::to_string(layerIdx);
|
||||
softmax->setName(softmaxLayerName.c_str());
|
||||
softmax->setAxes(1 << axes);
|
||||
output = softmax->getOutput(0);
|
||||
|
||||
return output;
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -7,14 +7,10 @@
|
||||
#define __SOFTMAX_LAYER_H__
|
||||
|
||||
#include <map>
|
||||
#include <cassert>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ITensor* softmaxLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* softmaxLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -5,28 +5,28 @@
|
||||
|
||||
#include "upsample_layer.h"
|
||||
|
||||
nvinfer1::ITensor* upsampleLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
#include <cassert>
|
||||
|
||||
nvinfer1::ITensor*
|
||||
upsampleLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network)
|
||||
{
|
||||
nvinfer1::ITensor* output;
|
||||
nvinfer1::ITensor* output;
|
||||
|
||||
assert(block.at("type") == "upsample");
|
||||
assert(block.find("stride") != block.end());
|
||||
assert(block.at("type") == "upsample");
|
||||
assert(block.find("stride") != block.end());
|
||||
|
||||
int stride = std::stoi(block.at("stride"));
|
||||
int stride = std::stoi(block.at("stride"));
|
||||
|
||||
float scale[3] = {1, static_cast<float>(stride), static_cast<float>(stride)};
|
||||
float scale[3] = {1, static_cast<float>(stride), static_cast<float>(stride)};
|
||||
|
||||
nvinfer1::IResizeLayer* resize = network->addResize(*input);
|
||||
assert(resize != nullptr);
|
||||
std::string resizeLayerName = "upsample_" + std::to_string(layerIdx);
|
||||
resize->setName(resizeLayerName.c_str());
|
||||
resize->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
|
||||
resize->setScales(scale, 3);
|
||||
output = resize->getOutput(0);
|
||||
nvinfer1::IResizeLayer* resize = network->addResize(*input);
|
||||
assert(resize != nullptr);
|
||||
std::string resizeLayerName = "upsample_" + std::to_string(layerIdx);
|
||||
resize->setName(resizeLayerName.c_str());
|
||||
resize->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
|
||||
resize->setScales(scale, 3);
|
||||
output = resize->getOutput(0);
|
||||
|
||||
return output;
|
||||
return output;
|
||||
}
|
||||
|
||||
@@ -7,14 +7,10 @@
|
||||
#define __UPSAMPLE_LAYER_H__
|
||||
|
||||
#include <map>
|
||||
#include <cassert>
|
||||
|
||||
#include "NvInfer.h"
|
||||
|
||||
nvinfer1::ITensor* upsampleLayer(
|
||||
int layerIdx,
|
||||
std::map<std::string, std::string>& block,
|
||||
nvinfer1::ITensor* input,
|
||||
nvinfer1::ITensor* upsampleLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
|
||||
nvinfer1::INetworkDefinition* network);
|
||||
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user