Big update

This commit is contained in:
Marcos Luciano
2023-05-19 03:05:43 -03:00
parent 68f762d5bd
commit 07feae9509
86 changed files with 1523 additions and 5223 deletions

View File

@@ -10,7 +10,7 @@
nvinfer1::ITensor*
batchnormLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, std::string weightsType, float eps, nvinfer1::ITensor* input,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
@@ -26,41 +26,21 @@ batchnormLayer(int layerIdx, std::map<std::string, std::string>& block, std::vec
std::vector<float> bnRunningMean;
std::vector<float> bnRunningVar;
if (weightsType == "weights") {
for (int i = 0; i < filters; ++i) {
bnBiases.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnWeights.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnRunningMean.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnRunningVar.push_back(sqrt(weights[weightPtr] + 1.0e-5));
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnBiases.push_back(weights[weightPtr]);
++weightPtr;
}
else {
for (int i = 0; i < filters; ++i) {
bnWeights.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnBiases.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnRunningMean.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnRunningVar.push_back(sqrt(weights[weightPtr] + eps));
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnWeights.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnRunningMean.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnRunningVar.push_back(sqrt(weights[weightPtr] + 1.0e-5));
++weightPtr;
}
int size = filters;

View File

@@ -14,7 +14,7 @@
#include "activation_layer.h"
nvinfer1::ITensor* batchnormLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, std::string weightsType, float eps, nvinfer1::ITensor* input,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -1,82 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "c2f_layer.h"
#include <cassert>
#include "convolutional_layer.h"
nvinfer1::ITensor*
c2fLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, std::string weightsType, float eps, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "c2f");
assert(block.find("n") != block.end());
assert(block.find("shortcut") != block.end());
assert(block.find("filters") != block.end());
int n = std::stoi(block.at("n"));
bool shortcut = (block.at("shortcut") == "1");
int filters = std::stoi(block.at("filters"));
nvinfer1::Dims inputDims = input->getDimensions();
nvinfer1::ISliceLayer* sliceLt = network->addSlice(*input,nvinfer1::Dims{3, {0, 0, 0}},
nvinfer1::Dims{3, {inputDims.d[0] / 2, inputDims.d[1], inputDims.d[2]}}, nvinfer1::Dims{3, {1, 1, 1}});
assert(sliceLt != nullptr);
std::string sliceLtLayerName = "slice_lt_" + std::to_string(layerIdx);
sliceLt->setName(sliceLtLayerName.c_str());
nvinfer1::ITensor* lt = sliceLt->getOutput(0);
nvinfer1::ISliceLayer* sliceRb = network->addSlice(*input,nvinfer1::Dims{3, {inputDims.d[0] / 2, 0, 0}},
nvinfer1::Dims{3, {inputDims.d[0] / 2, inputDims.d[1], inputDims.d[2]}}, nvinfer1::Dims{3, {1, 1, 1}});
assert(sliceRb != nullptr);
std::string sliceRbLayerName = "slice_rb_" + std::to_string(layerIdx);
sliceRb->setName(sliceRbLayerName.c_str());
nvinfer1::ITensor* rb = sliceRb->getOutput(0);
std::vector<nvinfer1::ITensor*> concatInputs;
concatInputs.push_back(lt);
concatInputs.push_back(rb);
output = rb;
for (int i = 0; i < n; ++i) {
std::string cv1MlayerName = "c2f_1_" + std::to_string(i + 1) + "_";
nvinfer1::ITensor* cv1M = convolutionalLayer(layerIdx, block, weights, trtWeights, weightPtr, weightsType, filters, eps,
output, network, cv1MlayerName);
assert(cv1M != nullptr);
std::string cv2MlayerName = "c2f_2_" + std::to_string(i + 1) + "_";
nvinfer1::ITensor* cv2M = convolutionalLayer(layerIdx, block, weights, trtWeights, weightPtr, weightsType, filters, eps,
cv1M, network, cv2MlayerName);
assert(cv2M != nullptr);
if (shortcut) {
nvinfer1::IElementWiseLayer* ew = network->addElementWise(*output, *cv2M, nvinfer1::ElementWiseOperation::kSUM);
assert(ew != nullptr);
std::string ewLayerName = "shortcut_c2f_" + std::to_string(i + 1) + "_" + std::to_string(layerIdx);
ew->setName(ewLayerName.c_str());
output = ew->getOutput(0);
concatInputs.push_back(output);
}
else {
output = cv2M;
concatInputs.push_back(output);
}
}
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(concatInputs.data(), concatInputs.size());
assert(concat != nullptr);
std::string concatLayerName = "route_" + std::to_string(layerIdx);
concat->setName(concatLayerName.c_str());
concat->setAxis(0);
output = concat->getOutput(0);
return output;
}

View File

@@ -1,18 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __C2F_LAYER_H__
#define __C2F_LAYER_H__
#include <map>
#include <vector>
#include "NvInfer.h"
nvinfer1::ITensor* c2fLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, std::string weightsType, float eps, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -1,29 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "cls_layer.h"
#include <cassert>
nvinfer1::ITensor*
clsLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "cls");
nvinfer1::IShuffleLayer* shuffle = network->addShuffle(*input);
assert(shuffle != nullptr);
std::string shuffleLayerName = "shuffle_" + std::to_string(layerIdx);
shuffle->setName(shuffleLayerName.c_str());
nvinfer1::Permutation permutation;
permutation.order[0] = 1;
permutation.order[1] = 0;
shuffle->setFirstTranspose(permutation);
output = shuffle->getOutput(0);
return output;
}

View File

@@ -1,16 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __CLS_LAYER_H__
#define __CLS_LAYER_H__
#include <map>
#include "NvInfer.h"
nvinfer1::ITensor* clsLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -10,8 +10,8 @@
nvinfer1::ITensor*
convolutionalLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, std::string weightsType, int& inputChannels, float eps,
nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network, std::string layerName)
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, int& inputChannels, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network, std::string layerName)
{
nvinfer1::ITensor* output;
@@ -58,117 +58,60 @@ convolutionalLayer(int layerIdx, std::map<std::string, std::string>& block, std:
nvinfer1::Weights convWt {nvinfer1::DataType::kFLOAT, nullptr, size};
nvinfer1::Weights convBias {nvinfer1::DataType::kFLOAT, nullptr, bias};
if (weightsType == "weights") {
if (batchNormalize == false) {
float* val;
if (bias != 0) {
val = new float[filters];
for (int i = 0; i < filters; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
convBias.values = val;
trtWeights.push_back(convBias);
}
val = new float[size];
for (int i = 0; i < size; ++i) {
if (batchNormalize == false) {
float* val;
if (bias != 0) {
val = new float[filters];
for (int i = 0; i < filters; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
convWt.values = val;
trtWeights.push_back(convWt);
convBias.values = val;
trtWeights.push_back(convBias);
}
else {
for (int i = 0; i < filters; ++i) {
bnBiases.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnWeights.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnRunningMean.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnRunningVar.push_back(sqrt(weights[weightPtr] + 1.0e-5));
++weightPtr;
}
float* val;
if (bias != 0) {
val = new float[filters];
for (int i = 0; i < filters; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
convBias.values = val;
}
val = new float[size];
for (int i = 0; i < size; ++i) {
val = new float[size];
for (int i = 0; i < size; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
convWt.values = val;
trtWeights.push_back(convWt);
if (bias != 0)
trtWeights.push_back(convBias);
}
convWt.values = val;
trtWeights.push_back(convWt);
}
else {
if (batchNormalize == false) {
float* val = new float[size];
for (int i = 0; i < size; ++i) {
for (int i = 0; i < filters; ++i) {
bnBiases.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnWeights.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnRunningMean.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnRunningVar.push_back(sqrt(weights[weightPtr] + 1.0e-5));
++weightPtr;
}
float* val;
if (bias != 0) {
val = new float[filters];
for (int i = 0; i < filters; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
convWt.values = val;
trtWeights.push_back(convWt);
if (bias != 0) {
val = new float[filters];
for (int i = 0; i < filters; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
convBias.values = val;
trtWeights.push_back(convBias);
}
convBias.values = val;
}
else {
float* val = new float[size];
for (int i = 0; i < size; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
convWt.values = val;
if (bias != 0) {
val = new float[filters];
for (int i = 0; i < filters; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
convBias.values = val;
}
for (int i = 0; i < filters; ++i) {
bnWeights.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnBiases.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnRunningMean.push_back(weights[weightPtr]);
++weightPtr;
}
for (int i = 0; i < filters; ++i) {
bnRunningVar.push_back(sqrt(weights[weightPtr] + eps));
++weightPtr;
}
trtWeights.push_back(convWt);
if (bias != 0)
trtWeights.push_back(convBias);
val = new float[size];
for (int i = 0; i < size; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
convWt.values = val;
trtWeights.push_back(convWt);
if (bias != 0)
trtWeights.push_back(convBias);
}
nvinfer1::IConvolutionLayer* conv = network->addConvolutionNd(*input, filters, nvinfer1::Dims{2, {kernelSize, kernelSize}},

View File

@@ -14,7 +14,7 @@
#include "activation_layer.h"
nvinfer1::ITensor* convolutionalLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, std::string weightsType, int& inputChannels, float eps,
nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network, std::string layerName = "");
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, int& inputChannels, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network, std::string layerName = "");
#endif

View File

@@ -9,8 +9,8 @@
nvinfer1::ITensor*
deconvolutionalLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, std::string weightsType, int& inputChannels,
nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network, std::string layerName)
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, int& inputChannels, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network, std::string layerName)
{
nvinfer1::ITensor* output;
@@ -47,43 +47,23 @@ deconvolutionalLayer(int layerIdx, std::map<std::string, std::string>& block, st
nvinfer1::Weights convWt {nvinfer1::DataType::kFLOAT, nullptr, size};
nvinfer1::Weights convBias {nvinfer1::DataType::kFLOAT, nullptr, bias};
if (weightsType == "weights") {
float* val;
if (bias != 0) {
val = new float[filters];
for (int i = 0; i < filters; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
convBias.values = val;
trtWeights.push_back(convBias);
}
val = new float[size];
for (int i = 0; i < size; ++i) {
float* val;
if (bias != 0) {
val = new float[filters];
for (int i = 0; i < filters; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
convWt.values = val;
trtWeights.push_back(convWt);
convBias.values = val;
trtWeights.push_back(convBias);
}
else {
float* val = new float[size];
for (int i = 0; i < size; ++i) {
val = new float[size];
for (int i = 0; i < size; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
convWt.values = val;
trtWeights.push_back(convWt);
if (bias != 0) {
val = new float[filters];
for (int i = 0; i < filters; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
convBias.values = val;
trtWeights.push_back(convBias);
}
}
convWt.values = val;
trtWeights.push_back(convWt);
nvinfer1::IDeconvolutionLayer* conv = network->addDeconvolutionNd(*input, filters,
nvinfer1::Dims{2, {kernelSize, kernelSize}}, convWt, convBias);

View File

@@ -12,7 +12,7 @@
#include "NvInfer.h"
nvinfer1::ITensor* deconvolutionalLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, std::string weightsType, int& inputChannels,
nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network, std::string layerName = "");
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, int& inputChannels, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network, std::string layerName = "");
#endif

View File

@@ -1,196 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "detect_v8_layer.h"
#include <cassert>
nvinfer1::ITensor*
detectV8Layer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "detect_v8");
assert(block.find("num") != block.end());
assert(block.find("classes") != block.end());
int num = std::stoi(block.at("num"));
int classes = std::stoi(block.at("classes"));
int reg_max = num / 4;
nvinfer1::Dims inputDims = input->getDimensions();
nvinfer1::ISliceLayer* sliceBox = network->addSlice(*input, nvinfer1::Dims{2, {0, 0}},
nvinfer1::Dims{2, {num, inputDims.d[1]}}, nvinfer1::Dims{2, {1, 1}});
assert(sliceBox != nullptr);
std::string sliceBoxLayerName = "slice_box_" + std::to_string(layerIdx);
sliceBox->setName(sliceBoxLayerName.c_str());
nvinfer1::ITensor* box = sliceBox->getOutput(0);
nvinfer1::ISliceLayer* sliceCls = network->addSlice(*input, nvinfer1::Dims{2, {num, 0}},
nvinfer1::Dims{2, {classes, inputDims.d[1]}}, nvinfer1::Dims{2, {1, 1}});
assert(sliceCls != nullptr);
std::string sliceClsLayerName = "slice_cls_" + std::to_string(layerIdx);
sliceCls->setName(sliceClsLayerName.c_str());
nvinfer1::ITensor* cls = sliceCls->getOutput(0);
nvinfer1::IShuffleLayer* shuffle1Box = network->addShuffle(*box);
assert(shuffle1Box != nullptr);
std::string shuffle1BoxLayerName = "shuffle1_box_" + std::to_string(layerIdx);
shuffle1Box->setName(shuffle1BoxLayerName.c_str());
nvinfer1::Dims reshape1Dims = {3, {4, reg_max, inputDims.d[1]}};
shuffle1Box->setReshapeDimensions(reshape1Dims);
nvinfer1::Permutation permutation1Box;
permutation1Box.order[0] = 1;
permutation1Box.order[1] = 0;
permutation1Box.order[2] = 2;
shuffle1Box->setSecondTranspose(permutation1Box);
box = shuffle1Box->getOutput(0);
nvinfer1::ISoftMaxLayer* softmax = network->addSoftMax(*box);
assert(softmax != nullptr);
std::string softmaxLayerName = "softmax_box_" + std::to_string(layerIdx);
softmax->setName(softmaxLayerName.c_str());
softmax->setAxes(1 << 0);
box = softmax->getOutput(0);
nvinfer1::Weights dflWt {nvinfer1::DataType::kFLOAT, nullptr, reg_max};
float* val = new float[reg_max];
for (int i = 0; i < reg_max; ++i) {
val[i] = i;
}
dflWt.values = val;
nvinfer1::IConvolutionLayer* conv = network->addConvolutionNd(*box, 1, nvinfer1::Dims{2, {1, 1}}, dflWt,
nvinfer1::Weights{});
assert(conv != nullptr);
std::string convLayerName = "conv_box_" + std::to_string(layerIdx);
conv->setName(convLayerName.c_str());
conv->setStrideNd(nvinfer1::Dims{2, {1, 1}});
conv->setPaddingNd(nvinfer1::Dims{2, {0, 0}});
box = conv->getOutput(0);
nvinfer1::IShuffleLayer* shuffle2Box = network->addShuffle(*box);
assert(shuffle2Box != nullptr);
std::string shuffle2BoxLayerName = "shuffle2_box_" + std::to_string(layerIdx);
shuffle2Box->setName(shuffle2BoxLayerName.c_str());
nvinfer1::Dims reshape2Dims = {2, {4, inputDims.d[1]}};
shuffle2Box->setReshapeDimensions(reshape2Dims);
box = shuffle2Box->getOutput(0);
nvinfer1::Dims shuffle2BoxDims = box->getDimensions();
nvinfer1::ISliceLayer* sliceLtBox = network->addSlice(*box, nvinfer1::Dims{2, {0, 0}},
nvinfer1::Dims{2, {2, shuffle2BoxDims.d[1]}}, nvinfer1::Dims{2, {1, 1}});
assert(sliceLtBox != nullptr);
std::string sliceLtBoxLayerName = "slice_lt_box_" + std::to_string(layerIdx);
sliceLtBox->setName(sliceLtBoxLayerName.c_str());
nvinfer1::ITensor* lt = sliceLtBox->getOutput(0);
nvinfer1::ISliceLayer* sliceRbBox = network->addSlice(*box, nvinfer1::Dims{2, {2, 0}},
nvinfer1::Dims{2, {2, shuffle2BoxDims.d[1]}}, nvinfer1::Dims{2, {1, 1}});
assert(sliceRbBox != nullptr);
std::string sliceRbBoxLayerName = "slice_rb_box_" + std::to_string(layerIdx);
sliceRbBox->setName(sliceRbBoxLayerName.c_str());
nvinfer1::ITensor* rb = sliceRbBox->getOutput(0);
int channels = 2 * shuffle2BoxDims.d[1];
nvinfer1::Weights anchorPointsWt {nvinfer1::DataType::kFLOAT, nullptr, channels};
val = new float[channels];
for (int i = 0; i < channels; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
anchorPointsWt.values = val;
trtWeights.push_back(anchorPointsWt);
nvinfer1::IConstantLayer* anchorPoints = network->addConstant(nvinfer1::Dims{2, {2, shuffle2BoxDims.d[1]}},
anchorPointsWt);
assert(anchorPoints != nullptr);
std::string anchorPointsLayerName = "anchor_points_" + std::to_string(layerIdx);
anchorPoints->setName(anchorPointsLayerName.c_str());
nvinfer1::ITensor* anchorPointsTensor = anchorPoints->getOutput(0);
nvinfer1::IElementWiseLayer* x1y1 = network->addElementWise(*anchorPointsTensor, *lt,
nvinfer1::ElementWiseOperation::kSUB);
assert(x1y1 != nullptr);
std::string x1y1LayerName = "x1y1_" + std::to_string(layerIdx);
x1y1->setName(x1y1LayerName.c_str());
nvinfer1::ITensor* x1y1Tensor = x1y1->getOutput(0);
nvinfer1::IElementWiseLayer* x2y2 = network->addElementWise(*rb, *anchorPointsTensor,
nvinfer1::ElementWiseOperation::kSUM);
assert(x2y2 != nullptr);
std::string x2y2LayerName = "x2y2_" + std::to_string(layerIdx);
x2y2->setName(x2y2LayerName.c_str());
nvinfer1::ITensor* x2y2Tensor = x2y2->getOutput(0);
std::vector<nvinfer1::ITensor*> concatBoxInputs;
concatBoxInputs.push_back(x1y1Tensor);
concatBoxInputs.push_back(x2y2Tensor);
nvinfer1::IConcatenationLayer* concatBox = network->addConcatenation(concatBoxInputs.data(), concatBoxInputs.size());
assert(concatBox != nullptr);
std::string concatBoxLayerName = "concat_box_" + std::to_string(layerIdx);
concatBox->setName(concatBoxLayerName.c_str());
concatBox->setAxis(0);
box = concatBox->getOutput(0);
channels = shuffle2BoxDims.d[1];
nvinfer1::Weights stridePointsWt {nvinfer1::DataType::kFLOAT, nullptr, channels};
val = new float[channels];
for (int i = 0; i < channels; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
stridePointsWt.values = val;
trtWeights.push_back(stridePointsWt);
nvinfer1::IConstantLayer* stridePoints = network->addConstant(nvinfer1::Dims{2, {1, shuffle2BoxDims.d[1]}},
stridePointsWt);
assert(stridePoints != nullptr);
std::string stridePointsLayerName = "stride_points_" + std::to_string(layerIdx);
stridePoints->setName(stridePointsLayerName.c_str());
nvinfer1::ITensor* stridePointsTensor = stridePoints->getOutput(0);
nvinfer1::IElementWiseLayer* pred = network->addElementWise(*box, *stridePointsTensor,
nvinfer1::ElementWiseOperation::kPROD);
assert(pred != nullptr);
std::string predLayerName = "pred_" + std::to_string(layerIdx);
pred->setName(predLayerName.c_str());
box = pred->getOutput(0);
nvinfer1::IActivationLayer* sigmoid = network->addActivation(*cls, nvinfer1::ActivationType::kSIGMOID);
assert(sigmoid != nullptr);
std::string sigmoidLayerName = "sigmoid_cls_" + std::to_string(layerIdx);
sigmoid->setName(sigmoidLayerName.c_str());
cls = sigmoid->getOutput(0);
std::vector<nvinfer1::ITensor*> concatInputs;
concatInputs.push_back(box);
concatInputs.push_back(cls);
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(concatInputs.data(), concatInputs.size());
assert(concat != nullptr);
std::string concatLayerName = "concat_" + std::to_string(layerIdx);
concat->setName(concatLayerName.c_str());
concat->setAxis(0);
output = concat->getOutput(0);
nvinfer1::IShuffleLayer* shuffle = network->addShuffle(*output);
assert(shuffle != nullptr);
std::string shuffleLayerName = "shuffle_" + std::to_string(layerIdx);
shuffle->setName(shuffleLayerName.c_str());
nvinfer1::Permutation permutation;
permutation.order[0] = 1;
permutation.order[1] = 0;
shuffle->setFirstTranspose(permutation);
output = shuffle->getOutput(0);
return output;
}

View File

@@ -1,18 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __DETECT_V8_LAYER_H__
#define __DETECT_V8_LAYER_H__
#include <map>
#include <vector>
#include "NvInfer.h"
nvinfer1::ITensor* detectV8Layer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -13,7 +13,7 @@ implicitLayer(int layerIdx, std::map<std::string, std::string>& block, std::vect
{
nvinfer1::ITensor* output;
assert(block.at("type") == "implicit_add" || block.at("type") == "implicit_mul");
assert(block.at("type") == "implicit" || block.at("type") == "implicit_add" || block.at("type") == "implicit_mul");
assert(block.find("filters") != block.end());
int filters = std::stoi(block.at("filters"));

View File

@@ -14,9 +14,10 @@ poolingLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::
{
nvinfer1::ITensor* output;
assert(block.at("type") == "maxpool" || block.at("type") == "avgpool");
assert(block.at("type") == "max" || block.at("type") == "maxpool" || block.at("type") == "avg" ||
block.at("type") == "avgpool");
if (block.at("type") == "maxpool") {
if (block.at("type") == "max" || block.at("type") == "maxpool") {
assert(block.find("size") != block.end());
assert(block.find("stride") != block.end());
@@ -36,7 +37,7 @@ poolingLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::
}
output = maxpool->getOutput(0);
}
else if (block.at("type") == "avgpool") {
else if (block.at("type") == "avg" || block.at("type") == "avgpool") {
nvinfer1::Dims inputDims = input->getDimensions();
nvinfer1::IPoolingLayer* avgpool = network->addPoolingNd(*input, nvinfer1::PoolingType::kAVERAGE,
nvinfer1::Dims{2, {inputDims.d[1], inputDims.d[2]}});

View File

@@ -1,54 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "reduce_layer.h"
nvinfer1::ITensor*
reduceLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "reduce");
assert(block.find("mode") != block.end());
assert(block.find("axes") != block.end());
std::string mode = block.at("mode");
nvinfer1::ReduceOperation operation;
if (mode == "mean")
operation = nvinfer1::ReduceOperation::kAVG;
std::string strAxes = block.at("axes");
std::vector<int32_t> axes;
size_t lastPos = 0, pos = 0;
while ((pos = strAxes.find(',', lastPos)) != std::string::npos) {
int vL = std::stoi(trim(strAxes.substr(lastPos, pos - lastPos)));
axes.push_back(vL);
lastPos = pos + 1;
}
if (lastPos < strAxes.length()) {
std::string lastV = trim(strAxes.substr(lastPos));
if (!lastV.empty())
axes.push_back(std::stoi(lastV));
}
assert(!axes.empty());
uint32_t axisMask = 0;
for (int axis : axes)
axisMask |= 1 << axis;
bool keepDims = false;
if (block.find("keep") != block.end())
keepDims = std::stoi(block.at("keep")) == 1 ? true : false;
nvinfer1::IReduceLayer* reduce = network->addReduce(*input, operation, axisMask, keepDims);
assert(reduce != nullptr);
std::string reduceLayerName = "reduce_" + std::to_string(layerIdx);
reduce->setName(reduceLayerName.c_str());
output = reduce->getOutput(0);
return output;
}

View File

@@ -1,14 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __REDUCE_LAYER_H__
#define __REDUCE_LAYER_H__
#include "../utils.h"
nvinfer1::ITensor* reduceLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -1,109 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "reg_layer.h"
#include <cassert>
nvinfer1::ITensor*
regLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "reg");
nvinfer1::IShuffleLayer* shuffle = network->addShuffle(*input);
assert(shuffle != nullptr);
std::string shuffleLayerName = "shuffle_" + std::to_string(layerIdx);
shuffle->setName(shuffleLayerName.c_str());
nvinfer1::Permutation permutation;
permutation.order[0] = 1;
permutation.order[1] = 0;
shuffle->setFirstTranspose(permutation);
output = shuffle->getOutput(0);
nvinfer1::Dims shuffleDims = output->getDimensions();
nvinfer1::ISliceLayer* sliceLt = network->addSlice(*output, nvinfer1::Dims{2, {0, 0}},
nvinfer1::Dims{2, {shuffleDims.d[0], 2}}, nvinfer1::Dims{2, {1, 1}});
assert(sliceLt != nullptr);
std::string sliceLtLayerName = "slice_lt_" + std::to_string(layerIdx);
sliceLt->setName(sliceLtLayerName.c_str());
nvinfer1::ITensor* lt = sliceLt->getOutput(0);
nvinfer1::ISliceLayer* sliceRb = network->addSlice(*output, nvinfer1::Dims{2, {0, 2}},
nvinfer1::Dims{2, {shuffleDims.d[0], 2}}, nvinfer1::Dims{2, {1, 1}});
assert(sliceRb != nullptr);
std::string sliceRbLayerName = "slice_rb_" + std::to_string(layerIdx);
sliceRb->setName(sliceRbLayerName.c_str());
nvinfer1::ITensor* rb = sliceRb->getOutput(0);
int channels = shuffleDims.d[0] * 2;
nvinfer1::Weights anchorPointsWt {nvinfer1::DataType::kFLOAT, nullptr, channels};
float* val = new float[channels];
for (int i = 0; i < channels; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
anchorPointsWt.values = val;
trtWeights.push_back(anchorPointsWt);
nvinfer1::IConstantLayer* anchorPoints = network->addConstant(nvinfer1::Dims{2, {shuffleDims.d[0], 2}}, anchorPointsWt);
assert(anchorPoints != nullptr);
std::string anchorPointsLayerName = "anchor_points_" + std::to_string(layerIdx);
anchorPoints->setName(anchorPointsLayerName.c_str());
nvinfer1::ITensor* anchorPointsTensor = anchorPoints->getOutput(0);
nvinfer1::IElementWiseLayer* x1y1 = network->addElementWise(*anchorPointsTensor, *lt,
nvinfer1::ElementWiseOperation::kSUB);
assert(x1y1 != nullptr);
std::string x1y1LayerName = "x1y1_" + std::to_string(layerIdx);
x1y1->setName(x1y1LayerName.c_str());
nvinfer1::ITensor* x1y1Tensor = x1y1->getOutput(0);
nvinfer1::IElementWiseLayer* x2y2 = network->addElementWise(*rb, *anchorPointsTensor,
nvinfer1::ElementWiseOperation::kSUM);
assert(x2y2 != nullptr);
std::string x2y2LayerName = "x2y2_" + std::to_string(layerIdx);
x2y2->setName(x2y2LayerName.c_str());
nvinfer1::ITensor* x2y2Tensor = x2y2->getOutput(0);
std::vector<nvinfer1::ITensor*> concatInputs;
concatInputs.push_back(x1y1Tensor);
concatInputs.push_back(x2y2Tensor);
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(concatInputs.data(), concatInputs.size());
assert(concat != nullptr);
std::string concatLayerName = "concat_" + std::to_string(layerIdx);
concat->setName(concatLayerName.c_str());
concat->setAxis(1);
output = concat->getOutput(0);
channels = shuffleDims.d[0];
nvinfer1::Weights stridePointsWt {nvinfer1::DataType::kFLOAT, nullptr, channels};
val = new float[channels];
for (int i = 0; i < channels; ++i) {
val[i] = weights[weightPtr];
++weightPtr;
}
stridePointsWt.values = val;
trtWeights.push_back(stridePointsWt);
nvinfer1::IConstantLayer* stridePoints = network->addConstant(nvinfer1::Dims{2, {shuffleDims.d[0], 1}}, stridePointsWt);
assert(stridePoints != nullptr);
std::string stridePointsLayerName = "stride_points_" + std::to_string(layerIdx);
stridePoints->setName(stridePointsLayerName.c_str());
nvinfer1::ITensor* stridePointsTensor = stridePoints->getOutput(0);
nvinfer1::IElementWiseLayer* pred = network->addElementWise(*output, *stridePointsTensor,
nvinfer1::ElementWiseOperation::kPROD);
assert(pred != nullptr);
std::string predLayerName = "pred_" + std::to_string(layerIdx);
pred->setName(predLayerName.c_str());
output = pred->getOutput(0);
return output;
}

View File

@@ -1,18 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __REG_LAYER_H__
#define __REG_LAYER_H__
#include <map>
#include <vector>
#include "NvInfer.h"
nvinfer1::ITensor* regLayer(int layerIdx, std::map<std::string, std::string>& block, std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -14,7 +14,7 @@ reorgLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::IT
{
nvinfer1::ITensor* output;
assert(block.at("type") == "reorg");
assert(block.at("type") == "reorg3d");
nvinfer1::Dims inputDims = input->getDimensions();

View File

@@ -0,0 +1,28 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "sam_layer.h"
#include <cassert>
nvinfer1::ITensor*
samLayer(int layerIdx, std::string activation, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
nvinfer1::ITensor* samInput, nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "sam");
nvinfer1::IElementWiseLayer* sam = network->addElementWise(*input, *samInput, nvinfer1::ElementWiseOperation::kPROD);
assert(sam != nullptr);
std::string samLayerName = "sam_" + std::to_string(layerIdx);
sam->setName(samLayerName.c_str());
output = sam->getOutput(0);
output = activationLayer(layerIdx, activation, output, network);
assert(output != nullptr);
return output;
}

View File

@@ -0,0 +1,18 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __SAM_LAYER_H__
#define __SAM_LAYER_H__
#include <map>
#include "NvInfer.h"
#include "activation_layer.h"
nvinfer1::ITensor* samLayer(int layerIdx, std::string activation, std::map<std::string, std::string>& block,
nvinfer1::ITensor* input, nvinfer1::ITensor* samInput, nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -8,7 +8,7 @@
#include <cassert>
nvinfer1::ITensor*
shortcutLayer(int layerIdx, std::string mode, std::string activation, std::string inputVol, std::string shortcutVol,
shortcutLayer(int layerIdx, std::string activation, std::string inputVol, std::string shortcutVol,
std::map<std::string, std::string>& block, nvinfer1::ITensor* input, nvinfer1::ITensor* shortcutInput,
nvinfer1::INetworkDefinition* network)
{
@@ -16,12 +16,7 @@ shortcutLayer(int layerIdx, std::string mode, std::string activation, std::strin
assert(block.at("type") == "shortcut");
nvinfer1::ElementWiseOperation operation = nvinfer1::ElementWiseOperation::kSUM;
if (mode == "mul")
operation = nvinfer1::ElementWiseOperation::kPROD;
if (mode == "add" && inputVol != shortcutVol) {
if (inputVol != shortcutVol) {
nvinfer1::ISliceLayer* slice = network->addSlice(*shortcutInput, nvinfer1::Dims{3, {0, 0, 0}}, input->getDimensions(),
nvinfer1::Dims{3, {1, 1, 1}});
assert(slice != nullptr);
@@ -32,7 +27,7 @@ shortcutLayer(int layerIdx, std::string mode, std::string activation, std::strin
else
output = shortcutInput;
nvinfer1::IElementWiseLayer* shortcut = network->addElementWise(*input, *output, operation);
nvinfer1::IElementWiseLayer* shortcut = network->addElementWise(*input, *output, nvinfer1::ElementWiseOperation::kSUM);
assert(shortcut != nullptr);
std::string shortcutLayerName = "shortcut_" + std::to_string(layerIdx);
shortcut->setName(shortcutLayerName.c_str());

View File

@@ -12,8 +12,8 @@
#include "activation_layer.h"
nvinfer1::ITensor* shortcutLayer(int layerIdx, std::string mode, std::string activation, std::string inputVol,
std::string shortcutVol, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
nvinfer1::ITensor* shortcut, nvinfer1::INetworkDefinition* network);
nvinfer1::ITensor* shortcutLayer(int layerIdx, std::string activation, std::string inputVol, std::string shortcutVol,
std::map<std::string, std::string>& block, nvinfer1::ITensor* input, nvinfer1::ITensor* shortcut,
nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -1,128 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "shuffle_layer.h"
nvinfer1::ITensor*
shuffleLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
std::vector<nvinfer1::ITensor*> tensorOutputs, nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "shuffle");
nvinfer1::IShuffleLayer* shuffle = network->addShuffle(*input);
assert(shuffle != nullptr);
std::string shuffleLayerName = "shuffle_" + std::to_string(layerIdx);
shuffle->setName(shuffleLayerName.c_str());
if (block.find("reshape") != block.end()) {
nvinfer1::Dims inputTensorDims = input->getDimensions();
std::string strReshape = block.at("reshape");
std::vector<int32_t> reshape;
size_t lastPos = 0, pos = 0;
while ((pos = strReshape.find(',', lastPos)) != std::string::npos) {
std::string V = trim(strReshape.substr(lastPos, pos - lastPos));
if (V == "c")
reshape.push_back(inputTensorDims.d[0]);
else if (V == "ch")
reshape.push_back(inputTensorDims.d[0] * inputTensorDims.d[1]);
else if (V == "cw")
reshape.push_back(inputTensorDims.d[0] * inputTensorDims.d[2]);
else if (V == "h")
reshape.push_back(inputTensorDims.d[1]);
else if (V == "hw")
reshape.push_back(inputTensorDims.d[1] * inputTensorDims.d[2]);
else if (V == "w")
reshape.push_back(inputTensorDims.d[2]);
else if (V == "chw")
reshape.push_back(inputTensorDims.d[0] * inputTensorDims.d[1] * inputTensorDims.d[2]);
else
reshape.push_back(std::stoi(V));
lastPos = pos + 1;
}
if (lastPos < strReshape.length()) {
std::string lastV = trim(strReshape.substr(lastPos));
if (!lastV.empty()) {
if (lastV == "c")
reshape.push_back(inputTensorDims.d[0]);
else if (lastV == "ch")
reshape.push_back(inputTensorDims.d[0] * inputTensorDims.d[1]);
else if (lastV == "cw")
reshape.push_back(inputTensorDims.d[0] * inputTensorDims.d[2]);
else if (lastV == "h")
reshape.push_back(inputTensorDims.d[1]);
else if (lastV == "hw")
reshape.push_back(inputTensorDims.d[1] * inputTensorDims.d[2]);
else if (lastV == "w")
reshape.push_back(inputTensorDims.d[2]);
else if (lastV == "chw")
reshape.push_back(inputTensorDims.d[0] * inputTensorDims.d[1] * inputTensorDims.d[2]);
else
reshape.push_back(std::stoi(lastV));
}
}
assert(!reshape.empty());
nvinfer1::Dims reshapeDims;
reshapeDims.nbDims = reshape.size();
for (uint i = 0; i < reshape.size(); ++i)
reshapeDims.d[i] = reshape[i];
shuffle->setReshapeDimensions(reshapeDims);
}
if (block.find("transpose1") != block.end()) {
std::string strTranspose1 = block.at("transpose1");
std::vector<int32_t> transpose1;
size_t lastPos = 0, pos = 0;
while ((pos = strTranspose1.find(',', lastPos)) != std::string::npos) {
int vL = std::stoi(trim(strTranspose1.substr(lastPos, pos - lastPos)));
transpose1.push_back(vL);
lastPos = pos + 1;
}
if (lastPos < strTranspose1.length()) {
std::string lastV = trim(strTranspose1.substr(lastPos));
if (!lastV.empty())
transpose1.push_back(std::stoi(lastV));
}
assert(!transpose1.empty());
nvinfer1::Permutation permutation1;
for (uint i = 0; i < transpose1.size(); ++i)
permutation1.order[i] = transpose1[i];
shuffle->setFirstTranspose(permutation1);
}
if (block.find("transpose2") != block.end()) {
std::string strTranspose2 = block.at("transpose2");
std::vector<int32_t> transpose2;
size_t lastPos = 0, pos = 0;
while ((pos = strTranspose2.find(',', lastPos)) != std::string::npos) {
int vL = std::stoi(trim(strTranspose2.substr(lastPos, pos - lastPos)));
transpose2.push_back(vL);
lastPos = pos + 1;
}
if (lastPos < strTranspose2.length()) {
std::string lastV = trim(strTranspose2.substr(lastPos));
if (!lastV.empty())
transpose2.push_back(std::stoi(lastV));
}
assert(!transpose2.empty());
nvinfer1::Permutation permutation2;
for (uint i = 0; i < transpose2.size(); ++i)
permutation2.order[i] = transpose2[i];
shuffle->setSecondTranspose(permutation2);
}
output = shuffle->getOutput(0);
return output;
}

View File

@@ -1,14 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __SHUFFLE_LAYER_H__
#define __SHUFFLE_LAYER_H__
#include "../utils.h"
nvinfer1::ITensor* shuffleLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
std::vector<nvinfer1::ITensor*> tensorOutputs, nvinfer1::INetworkDefinition* network);
#endif

View File

@@ -1,29 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#include "softmax_layer.h"
#include <cassert>
nvinfer1::ITensor*
softmaxLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
assert(block.at("type") == "softmax");
assert(block.find("axes") != block.end());
int axes = std::stoi(block.at("axes"));
nvinfer1::ISoftMaxLayer* softmax = network->addSoftMax(*input);
assert(softmax != nullptr);
std::string softmaxLayerName = "softmax_" + std::to_string(layerIdx);
softmax->setName(softmaxLayerName.c_str());
softmax->setAxes(1 << axes);
output = softmax->getOutput(0);
return output;
}

View File

@@ -1,16 +0,0 @@
/*
* Created by Marcos Luciano
* https://www.github.com/marcoslucianops
*/
#ifndef __SOFTMAX_LAYER_H__
#define __SOFTMAX_LAYER_H__
#include <map>
#include "NvInfer.h"
nvinfer1::ITensor* softmaxLayer(int layerIdx, std::map<std::string, std::string>& block, nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);
#endif