Minor fixes
This commit is contained in:
@@ -11,8 +11,9 @@
|
||||
|
||||
inline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); }
|
||||
|
||||
__device__ void softmaxGPU(const float* input, const int bbindex, const int numGridCells,
|
||||
uint z_id, const uint numOutputClasses, float temp, float* output)
|
||||
__device__ void softmaxGPU(
|
||||
const float* input, const int bbindex, const int numGridCells, uint z_id,
|
||||
const uint numOutputClasses, float temp, float* output)
|
||||
{
|
||||
int i;
|
||||
float sum = 0;
|
||||
@@ -31,8 +32,10 @@ __device__ void softmaxGPU(const float* input, const int bbindex, const int numG
|
||||
}
|
||||
}
|
||||
|
||||
__global__ void gpuRegionLayer(const float* input, float* output, float* softmax, const uint gridSizeX, const uint gridSizeY, const uint numOutputClasses,
|
||||
const uint numBBoxes, const float* anchors)
|
||||
__global__ void gpuRegionLayer(
|
||||
const float* input, float* output, float* softmax, const uint netWidth, const uint netHeight,
|
||||
const uint gridSizeX, const uint gridSizeY, const uint numOutputClasses, const uint numBBoxes,
|
||||
const float* anchors)
|
||||
{
|
||||
uint x_id = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
uint y_id = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
@@ -47,16 +50,20 @@ __global__ void gpuRegionLayer(const float* input, float* output, float* softmax
|
||||
const int bbindex = y_id * gridSizeX + x_id;
|
||||
|
||||
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]
|
||||
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]) + x_id;
|
||||
= (sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)])
|
||||
+ x_id) * netWidth / gridSizeX;
|
||||
|
||||
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]
|
||||
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]) + y_id;
|
||||
= (sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)])
|
||||
+ y_id) * netHeight / gridSizeY;
|
||||
|
||||
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]
|
||||
= __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]) * anchors[z_id * 2];
|
||||
= __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)])
|
||||
* anchors[z_id * 2] * netWidth / gridSizeX;
|
||||
|
||||
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]
|
||||
= __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]) * anchors[z_id * 2 + 1];
|
||||
= __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)])
|
||||
* anchors[z_id * 2 + 1] * netHeight / gridSizeY;
|
||||
|
||||
softmaxGPU(input, bbindex, numGridCells, z_id, numOutputClasses, 1.0, softmax);
|
||||
|
||||
@@ -85,25 +92,29 @@ __global__ void gpuRegionLayer(const float* input, float* output, float* softmax
|
||||
= maxIndex;
|
||||
}
|
||||
|
||||
cudaError_t cudaYoloLayer_v2(const void* input, void* output, void* softmax, const uint& batchSize, const uint& gridSizeX, const uint& gridSizeY,
|
||||
const uint& numOutputClasses, const uint& numBBoxes, uint64_t outputSize, cudaStream_t stream,
|
||||
const void* anchors);
|
||||
cudaError_t cudaRegionLayer(
|
||||
const void* input, void* output, void* softmax, const uint& batchSize, const uint& netWidth,
|
||||
const uint& netHeight, const uint& gridSizeX, const uint& gridSizeY, const uint& numOutputClasses,
|
||||
const uint& numBBoxes, uint64_t& outputSize, const void* anchors, cudaStream_t stream);
|
||||
|
||||
cudaError_t cudaYoloLayer_v2(const void* input, void* output, void* softmax, const uint& batchSize, const uint& gridSizeX, const uint& gridSizeY,
|
||||
const uint& numOutputClasses, const uint& numBBoxes, uint64_t outputSize, cudaStream_t stream,
|
||||
const void* anchors)
|
||||
cudaError_t cudaRegionLayer(
|
||||
const void* input, void* output, void* softmax, const uint& batchSize, const uint& netWidth,
|
||||
const uint& netHeight, const uint& gridSizeX, const uint& gridSizeY, const uint& numOutputClasses,
|
||||
const uint& numBBoxes, uint64_t& outputSize, const void* anchors, cudaStream_t stream)
|
||||
{
|
||||
dim3 threads_per_block(16, 16, 4);
|
||||
dim3 number_of_blocks((gridSizeX / threads_per_block.x) + 1,
|
||||
(gridSizeY / threads_per_block.y) + 1,
|
||||
(numBBoxes / threads_per_block.z) + 1);
|
||||
|
||||
for (unsigned int batch = 0; batch < batchSize; ++batch)
|
||||
{
|
||||
gpuRegionLayer<<<number_of_blocks, threads_per_block, 0, stream>>>(
|
||||
reinterpret_cast<const float*>(input) + (batch * outputSize),
|
||||
reinterpret_cast<float*>(output) + (batch * outputSize),
|
||||
reinterpret_cast<float*>(softmax) + (batch * outputSize), gridSizeX, gridSizeY, numOutputClasses,
|
||||
numBBoxes, reinterpret_cast<const float*>(anchors));
|
||||
reinterpret_cast<float*>(output) + (batch * outputSize),
|
||||
reinterpret_cast<float*>(softmax) + (batch * outputSize),
|
||||
netWidth, netHeight, gridSizeX, gridSizeY, numOutputClasses, numBBoxes,
|
||||
reinterpret_cast<const float*>(anchors));
|
||||
}
|
||||
return cudaGetLastError();
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user