Added YOLOv5 6.0 native support

This commit is contained in:
unknown
2021-12-09 15:44:17 -03:00
parent dcc44b730c
commit bfd9268a31
8 changed files with 688 additions and 80 deletions

View File

@@ -12,7 +12,10 @@ nvinfer1::ILayer* activationLayer(
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
{
if (activation == "relu")
if (activation == "linear") {
// Pass
}
else if (activation == "relu")
{
nvinfer1::IActivationLayer* relu = network->addActivation(
*input, nvinfer1::ActivationType::kRELU);
@@ -78,5 +81,24 @@ nvinfer1::ILayer* activationLayer(
mish->setName(mishLayerName.c_str());
output = mish;
}
else if (activation == "silu")
{
nvinfer1::IActivationLayer* sigmoid = network->addActivation(
*input, nvinfer1::ActivationType::kSIGMOID);
assert(sigmoid != nullptr);
std::string sigmoidLayerName = "sigmoid_" + std::to_string(layerIdx);
sigmoid->setName(sigmoidLayerName.c_str());
nvinfer1::IElementWiseLayer* silu = network->addElementWise(
*sigmoid->getOutput(0), *input,
nvinfer1::ElementWiseOperation::kPROD);
assert(silu != nullptr);
std::string siluLayerName = "silu_" + std::to_string(layerIdx);
silu->setName(siluLayerName.c_str());
output = silu;
}
else {
std::cerr << "Activation not supported: " << activation << std::endl;
std::abort();
}
return output;
}

View File

@@ -12,6 +12,7 @@ nvinfer1::ILayer* convolutionalLayer(
std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights,
int& weightPtr,
std::string weightsType,
int& inputChannels,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network)
@@ -56,57 +57,111 @@ nvinfer1::ILayer* convolutionalLayer(
nvinfer1::Weights convWt{nvinfer1::DataType::kFLOAT, nullptr, size};
nvinfer1::Weights convBias{nvinfer1::DataType::kFLOAT, nullptr, bias};
if (batchNormalize == false)
{
float* val = new float[filters];
for (int i = 0; i < filters; ++i)
if (weightsType == "weights") {
if (batchNormalize == false)
{
val[i] = weights[weightPtr];
weightPtr++;
float* val = new float[filters];
for (int i = 0; i < filters; ++i)
{
val[i] = weights[weightPtr];
weightPtr++;
}
convBias.values = val;
trtWeights.push_back(convBias);
val = new float[size];
for (int i = 0; i < size; ++i)
{
val[i] = weights[weightPtr];
weightPtr++;
}
convWt.values = val;
trtWeights.push_back(convWt);
}
convBias.values = val;
trtWeights.push_back(convBias);
val = new float[size];
for (int i = 0; i < size; ++i)
else
{
val[i] = weights[weightPtr];
weightPtr++;
for (int i = 0; i < filters; ++i)
{
bnBiases.push_back(weights[weightPtr]);
weightPtr++;
}
for (int i = 0; i < filters; ++i)
{
bnWeights.push_back(weights[weightPtr]);
weightPtr++;
}
for (int i = 0; i < filters; ++i)
{
bnRunningMean.push_back(weights[weightPtr]);
weightPtr++;
}
for (int i = 0; i < filters; ++i)
{
bnRunningVar.push_back(sqrt(weights[weightPtr] + 1.0e-5));
weightPtr++;
}
float* val = new float[size];
for (int i = 0; i < size; ++i)
{
val[i] = weights[weightPtr];
weightPtr++;
}
convWt.values = val;
trtWeights.push_back(convWt);
trtWeights.push_back(convBias);
}
convWt.values = val;
trtWeights.push_back(convWt);
}
else
{
for (int i = 0; i < filters; ++i)
else {
if (batchNormalize == false)
{
bnBiases.push_back(weights[weightPtr]);
weightPtr++;
float* val = new float[size];
for (int i = 0; i < size; ++i)
{
val[i] = weights[weightPtr];
weightPtr++;
}
convWt.values = val;
trtWeights.push_back(convWt);
val = new float[filters];
for (int i = 0; i < filters; ++i)
{
val[i] = weights[weightPtr];
weightPtr++;
}
convBias.values = val;
trtWeights.push_back(convBias);
}
for (int i = 0; i < filters; ++i)
else
{
bnWeights.push_back(weights[weightPtr]);
weightPtr++;
float* val = new float[size];
for (int i = 0; i < size; ++i)
{
val[i] = weights[weightPtr];
weightPtr++;
}
convWt.values = val;
for (int i = 0; i < filters; ++i)
{
bnWeights.push_back(weights[weightPtr]);
weightPtr++;
}
for (int i = 0; i < filters; ++i)
{
bnBiases.push_back(weights[weightPtr]);
weightPtr++;
}
for (int i = 0; i < filters; ++i)
{
bnRunningMean.push_back(weights[weightPtr]);
weightPtr++;
}
for (int i = 0; i < filters; ++i)
{
bnRunningVar.push_back(sqrt(weights[weightPtr] + 1.0e-5));
weightPtr++;
}
trtWeights.push_back(convWt);
trtWeights.push_back(convBias);
}
for (int i = 0; i < filters; ++i)
{
bnRunningMean.push_back(weights[weightPtr]);
weightPtr++;
}
for (int i = 0; i < filters; ++i)
{
bnRunningVar.push_back(sqrt(weights[weightPtr] + 1.0e-5));
weightPtr++;
}
float* val = new float[size];
for (int i = 0; i < size; ++i)
{
val[i] = weights[weightPtr];
weightPtr++;
}
convWt.values = val;
trtWeights.push_back(convWt);
trtWeights.push_back(convBias);
}
nvinfer1::IConvolutionLayer* conv = network->addConvolution(

View File

@@ -19,6 +19,7 @@ nvinfer1::ILayer* convolutionalLayer(
std::vector<float>& weights,
std::vector<nvinfer1::Weights>& trtWeights,
int& weightPtr,
std::string weightsType,
int& inputChannels,
nvinfer1::ITensor* input,
nvinfer1::INetworkDefinition* network);