DeepStream 7.1 + Fixes + New model output format

This commit is contained in:
Marcos Luciano
2024-11-07 11:25:17 -03:00
parent bca9e59d07
commit b451b036b2
75 changed files with 2383 additions and 1113 deletions

View File

@@ -9,58 +9,72 @@
nvinfer1::ITensor*
sliceLayer(int layerIdx, std::string& name, nvinfer1::ITensor* input, nvinfer1::Dims start, nvinfer1::Dims size,
nvinfer1::Dims stride, nvinfer1::INetworkDefinition* network, uint batchSize)
nvinfer1::Dims stride, nvinfer1::INetworkDefinition* network)
{
nvinfer1::ITensor* output;
int tensorBatch = input->getDimensions().d[0];
nvinfer1::ISliceLayer* slice;
nvinfer1::ISliceLayer* slice = network->addSlice(*input, start, size, stride);
nvinfer1::Dims inputDims = input->getDimensions();
if (inputDims.d[0] == -1) {
slice = network->addSlice(*input, start, nvinfer1::Dims{}, stride);
assert(slice != nullptr);
if (tensorBatch == -1) {
int nbDims = size.nbDims;
nvinfer1::Weights constant1Wt {nvinfer1::DataType::kINT32, nullptr, nbDims};
nvinfer1::IShapeLayer* shape = network->addShape(*input);
assert(shape != nullptr);
std::string shapeLayerName = "shape_" + name + "_" + std::to_string(layerIdx);
shape->setName(shapeLayerName.c_str());
nvinfer1::ITensor* shapeTensor = shape->getOutput(0);
assert(shapeTensor != nullptr);
int* val1 = new int[nbDims];
val1[0] = 1;
for (int i = 1; i < nbDims; ++i) {
val1[i] = size.d[i];
#if NV_TENSORRT_MAJOR >= 10
nvinfer1::ICastLayer* castShape = network->addCast(*shapeTensor, nvinfer1::DataType::kINT32);
assert(castShape != nullptr);
std::string castShapeLayerName = "cast_shape_" + name + "_" + std::to_string(layerIdx);
castShape->setName(castShapeLayerName.c_str());
nvinfer1::ITensor* castShapeTensor = castShape->getOutput(0);
assert(castShapeTensor != nullptr);
shapeTensor = castShapeTensor;
#endif
nvinfer1::Weights constantWt {nvinfer1::DataType::kINT32, nullptr, nbDims};
int* val = new int[nbDims];
for (int i = 0; i < nbDims; ++i) {
if (inputDims.d[i] == size.d[i]) {
val[i] = 0;
}
else {
val[i] = inputDims.d[i] - size.d[i];
}
}
constant1Wt.values = val1;
constantWt.values = val;
nvinfer1::IConstantLayer* constant1 = network->addConstant(nvinfer1::Dims{1, {nbDims}}, constant1Wt);
assert(constant1 != nullptr);
std::string constant1LayerName = "constant1_" + name + "_" + std::to_string(layerIdx);
constant1->setName(constant1LayerName.c_str());
nvinfer1::ITensor* constant1Tensor = constant1->getOutput(0);
nvinfer1::IConstantLayer* constant = network->addConstant(nvinfer1::Dims{1, {nbDims}}, constantWt);
assert(constant != nullptr);
std::string constantLayerName = "constant_" + name + "_" + std::to_string(layerIdx);
constant->setName(constantLayerName.c_str());
nvinfer1::ITensor* constantTensor = constant->getOutput(0);
assert(constantTensor != nullptr);
nvinfer1::Weights constant2Wt {nvinfer1::DataType::kINT32, nullptr, nbDims};
nvinfer1::IElementWiseLayer* divide = network->addElementWise(*shapeTensor, *constantTensor,
nvinfer1::ElementWiseOperation::kSUB);
assert(divide != nullptr);
std::string divideLayerName = "divide_" + name + "_" + std::to_string(layerIdx);
divide->setName(divideLayerName.c_str());
nvinfer1::ITensor* divideTensor = divide->getOutput(0);
assert(divideTensor != nullptr);
int* val2 = new int[nbDims];
val2[0] = batchSize;
for (int i = 1; i < nbDims; ++i) {
val2[i] = 1;
}
constant2Wt.values = val2;
nvinfer1::IConstantLayer* constant2 = network->addConstant(nvinfer1::Dims{1, {nbDims}}, constant2Wt);
assert(constant2 != nullptr);
std::string constant2LayerName = "constant2_" + name + "_" + std::to_string(layerIdx);
constant2->setName(constant2LayerName.c_str());
nvinfer1::ITensor* constant2Tensor = constant2->getOutput(0);
nvinfer1::IElementWiseLayer* newSize = network->addElementWise(*constant1Tensor, *constant2Tensor,
nvinfer1::ElementWiseOperation::kPROD);
assert(newSize != nullptr);
std::string newSizeLayerName = "new_size_" + name + "_" + std::to_string(layerIdx);
newSize->setName(newSizeLayerName.c_str());
nvinfer1::ITensor* newSizeTensor = newSize->getOutput(0);
slice->setInput(2, *newSizeTensor);
slice->setInput(2, *divideTensor);
}
else {
slice = network->addSlice(*input, start, size, stride);
assert(slice != nullptr);
}
assert(slice != nullptr);
std::string sliceLayerName = name + "_" + std::to_string(layerIdx);
slice->setName(sliceLayerName.c_str());
output = slice->getOutput(0);