success
This commit is contained in:
28
modules/module_lib/pointnet2_utils/pointnet2/src/ball_query.cpp
Executable file
28
modules/module_lib/pointnet2_utils/pointnet2/src/ball_query.cpp
Executable file
@@ -0,0 +1,28 @@
|
||||
#include <torch/serialize/tensor.h>
|
||||
#include <vector>
|
||||
// #include <THC/THC.h>
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime_api.h>
|
||||
#include "ball_query_gpu.h"
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#include <ATen/cuda/CUDAEvent.h>
|
||||
|
||||
// extern THCState *state;
|
||||
|
||||
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ")
|
||||
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ")
|
||||
#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
|
||||
|
||||
int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample,
|
||||
at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor) {
|
||||
CHECK_INPUT(new_xyz_tensor);
|
||||
CHECK_INPUT(xyz_tensor);
|
||||
const float *new_xyz = new_xyz_tensor.data<float>();
|
||||
const float *xyz = xyz_tensor.data<float>();
|
||||
int *idx = idx_tensor.data<int>();
|
||||
|
||||
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
ball_query_kernel_launcher_fast(b, n, m, radius, nsample, new_xyz, xyz, idx, stream);
|
||||
return 1;
|
||||
}
|
67
modules/module_lib/pointnet2_utils/pointnet2/src/ball_query_gpu.cu
Executable file
67
modules/module_lib/pointnet2_utils/pointnet2/src/ball_query_gpu.cu
Executable file
@@ -0,0 +1,67 @@
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "ball_query_gpu.h"
|
||||
#include "cuda_utils.h"
|
||||
|
||||
|
||||
__global__ void ball_query_kernel_fast(int b, int n, int m, float radius, int nsample,
|
||||
const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) {
|
||||
// new_xyz: (B, M, 3)
|
||||
// xyz: (B, N, 3)
|
||||
// output:
|
||||
// idx: (B, M, nsample)
|
||||
int bs_idx = blockIdx.y;
|
||||
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
if (bs_idx >= b || pt_idx >= m) return;
|
||||
|
||||
new_xyz += bs_idx * m * 3 + pt_idx * 3;
|
||||
xyz += bs_idx * n * 3;
|
||||
idx += bs_idx * m * nsample + pt_idx * nsample;
|
||||
|
||||
float radius2 = radius * radius;
|
||||
float new_x = new_xyz[0];
|
||||
float new_y = new_xyz[1];
|
||||
float new_z = new_xyz[2];
|
||||
|
||||
int cnt = 0;
|
||||
for (int k = 0; k < n; ++k) {
|
||||
float x = xyz[k * 3 + 0];
|
||||
float y = xyz[k * 3 + 1];
|
||||
float z = xyz[k * 3 + 2];
|
||||
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z);
|
||||
if (d2 < radius2){
|
||||
if (cnt == 0){
|
||||
for (int l = 0; l < nsample; ++l) {
|
||||
idx[l] = k;
|
||||
}
|
||||
}
|
||||
idx[cnt] = k;
|
||||
++cnt;
|
||||
if (cnt >= nsample) break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, \
|
||||
const float *new_xyz, const float *xyz, int *idx, cudaStream_t stream) {
|
||||
// new_xyz: (B, M, 3)
|
||||
// xyz: (B, N, 3)
|
||||
// output:
|
||||
// idx: (B, M, nsample)
|
||||
|
||||
cudaError_t err;
|
||||
|
||||
dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
|
||||
dim3 threads(THREADS_PER_BLOCK);
|
||||
|
||||
ball_query_kernel_fast<<<blocks, threads, 0, stream>>>(b, n, m, radius, nsample, new_xyz, xyz, idx);
|
||||
// cudaDeviceSynchronize(); // for using printf in kernel function
|
||||
err = cudaGetLastError();
|
||||
if (cudaSuccess != err) {
|
||||
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
|
||||
exit(-1);
|
||||
}
|
||||
}
|
15
modules/module_lib/pointnet2_utils/pointnet2/src/ball_query_gpu.h
Executable file
15
modules/module_lib/pointnet2_utils/pointnet2/src/ball_query_gpu.h
Executable file
@@ -0,0 +1,15 @@
|
||||
#ifndef _BALL_QUERY_GPU_H
|
||||
#define _BALL_QUERY_GPU_H
|
||||
|
||||
#include <torch/serialize/tensor.h>
|
||||
#include <vector>
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime_api.h>
|
||||
|
||||
int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample,
|
||||
at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor);
|
||||
|
||||
void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample,
|
||||
const float *xyz, const float *new_xyz, int *idx, cudaStream_t stream);
|
||||
|
||||
#endif
|
15
modules/module_lib/pointnet2_utils/pointnet2/src/cuda_utils.h
Executable file
15
modules/module_lib/pointnet2_utils/pointnet2/src/cuda_utils.h
Executable file
@@ -0,0 +1,15 @@
|
||||
#ifndef _CUDA_UTILS_H
|
||||
#define _CUDA_UTILS_H
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#define TOTAL_THREADS 1024
|
||||
#define THREADS_PER_BLOCK 256
|
||||
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
|
||||
|
||||
inline int opt_n_threads(int work_size) {
|
||||
const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0);
|
||||
|
||||
return max(min(1 << pow_2, TOTAL_THREADS), 1);
|
||||
}
|
||||
#endif
|
37
modules/module_lib/pointnet2_utils/pointnet2/src/group_points.cpp
Executable file
37
modules/module_lib/pointnet2_utils/pointnet2/src/group_points.cpp
Executable file
@@ -0,0 +1,37 @@
|
||||
#include <torch/serialize/tensor.h>
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime_api.h>
|
||||
#include <vector>
|
||||
// #include <THC/THC.h>
|
||||
#include "group_points_gpu.h"
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#include <ATen/cuda/CUDAEvent.h>
|
||||
// extern THCState *state;
|
||||
|
||||
|
||||
int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample,
|
||||
at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) {
|
||||
|
||||
float *grad_points = grad_points_tensor.data<float>();
|
||||
const int *idx = idx_tensor.data<int>();
|
||||
const float *grad_out = grad_out_tensor.data<float>();
|
||||
|
||||
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
group_points_grad_kernel_launcher_fast(b, c, n, npoints, nsample, grad_out, idx, grad_points, stream);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample,
|
||||
at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor) {
|
||||
|
||||
const float *points = points_tensor.data<float>();
|
||||
const int *idx = idx_tensor.data<int>();
|
||||
float *out = out_tensor.data<float>();
|
||||
|
||||
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
group_points_kernel_launcher_fast(b, c, n, npoints, nsample, points, idx, out, stream);
|
||||
return 1;
|
||||
}
|
86
modules/module_lib/pointnet2_utils/pointnet2/src/group_points_gpu.cu
Executable file
86
modules/module_lib/pointnet2_utils/pointnet2/src/group_points_gpu.cu
Executable file
@@ -0,0 +1,86 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "cuda_utils.h"
|
||||
#include "group_points_gpu.h"
|
||||
|
||||
|
||||
__global__ void group_points_grad_kernel_fast(int b, int c, int n, int npoints, int nsample,
|
||||
const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) {
|
||||
// grad_out: (B, C, npoints, nsample)
|
||||
// idx: (B, npoints, nsample)
|
||||
// output:
|
||||
// grad_points: (B, C, N)
|
||||
int bs_idx = blockIdx.z;
|
||||
int c_idx = blockIdx.y;
|
||||
int index = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
int pt_idx = index / nsample;
|
||||
if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
|
||||
|
||||
int sample_idx = index % nsample;
|
||||
grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx;
|
||||
idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
|
||||
|
||||
atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0] , grad_out[0]);
|
||||
}
|
||||
|
||||
void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
|
||||
const float *grad_out, const int *idx, float *grad_points, cudaStream_t stream) {
|
||||
// grad_out: (B, C, npoints, nsample)
|
||||
// idx: (B, npoints, nsample)
|
||||
// output:
|
||||
// grad_points: (B, C, N)
|
||||
cudaError_t err;
|
||||
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
|
||||
dim3 threads(THREADS_PER_BLOCK);
|
||||
|
||||
group_points_grad_kernel_fast<<<blocks, threads, 0, stream>>>(b, c, n, npoints, nsample, grad_out, idx, grad_points);
|
||||
|
||||
err = cudaGetLastError();
|
||||
if (cudaSuccess != err) {
|
||||
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__global__ void group_points_kernel_fast(int b, int c, int n, int npoints, int nsample,
|
||||
const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) {
|
||||
// points: (B, C, N)
|
||||
// idx: (B, npoints, nsample)
|
||||
// output:
|
||||
// out: (B, C, npoints, nsample)
|
||||
int bs_idx = blockIdx.z;
|
||||
int c_idx = blockIdx.y;
|
||||
int index = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
int pt_idx = index / nsample;
|
||||
if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
|
||||
|
||||
int sample_idx = index % nsample;
|
||||
|
||||
idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
|
||||
int in_idx = bs_idx * c * n + c_idx * n + idx[0];
|
||||
int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx;
|
||||
|
||||
out[out_idx] = points[in_idx];
|
||||
}
|
||||
|
||||
|
||||
void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
|
||||
const float *points, const int *idx, float *out, cudaStream_t stream) {
|
||||
// points: (B, C, N)
|
||||
// idx: (B, npoints, nsample)
|
||||
// output:
|
||||
// out: (B, C, npoints, nsample)
|
||||
cudaError_t err;
|
||||
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
|
||||
dim3 threads(THREADS_PER_BLOCK);
|
||||
|
||||
group_points_kernel_fast<<<blocks, threads, 0, stream>>>(b, c, n, npoints, nsample, points, idx, out);
|
||||
// cudaDeviceSynchronize(); // for using printf in kernel function
|
||||
err = cudaGetLastError();
|
||||
if (cudaSuccess != err) {
|
||||
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
|
||||
exit(-1);
|
||||
}
|
||||
}
|
22
modules/module_lib/pointnet2_utils/pointnet2/src/group_points_gpu.h
Executable file
22
modules/module_lib/pointnet2_utils/pointnet2/src/group_points_gpu.h
Executable file
@@ -0,0 +1,22 @@
|
||||
#ifndef _GROUP_POINTS_GPU_H
|
||||
#define _GROUP_POINTS_GPU_H
|
||||
|
||||
#include <torch/serialize/tensor.h>
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime_api.h>
|
||||
#include <vector>
|
||||
|
||||
|
||||
int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample,
|
||||
at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor);
|
||||
|
||||
void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
|
||||
const float *points, const int *idx, float *out, cudaStream_t stream);
|
||||
|
||||
int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample,
|
||||
at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor);
|
||||
|
||||
void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
|
||||
const float *grad_out, const int *idx, float *grad_points, cudaStream_t stream);
|
||||
|
||||
#endif
|
59
modules/module_lib/pointnet2_utils/pointnet2/src/interpolate.cpp
Executable file
59
modules/module_lib/pointnet2_utils/pointnet2/src/interpolate.cpp
Executable file
@@ -0,0 +1,59 @@
|
||||
#include <torch/serialize/tensor.h>
|
||||
#include <vector>
|
||||
// #include <THC/THC.h>
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#include <ATen/cuda/CUDAEvent.h>
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime_api.h>
|
||||
#include "interpolate_gpu.h"
|
||||
|
||||
// extern THCState *state;
|
||||
|
||||
|
||||
void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor,
|
||||
at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor) {
|
||||
const float *unknown = unknown_tensor.data<float>();
|
||||
const float *known = known_tensor.data<float>();
|
||||
float *dist2 = dist2_tensor.data<float>();
|
||||
int *idx = idx_tensor.data<int>();
|
||||
|
||||
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
three_nn_kernel_launcher_fast(b, n, m, unknown, known, dist2, idx, stream);
|
||||
}
|
||||
|
||||
|
||||
void three_interpolate_wrapper_fast(int b, int c, int m, int n,
|
||||
at::Tensor points_tensor,
|
||||
at::Tensor idx_tensor,
|
||||
at::Tensor weight_tensor,
|
||||
at::Tensor out_tensor) {
|
||||
|
||||
const float *points = points_tensor.data<float>();
|
||||
const float *weight = weight_tensor.data<float>();
|
||||
float *out = out_tensor.data<float>();
|
||||
const int *idx = idx_tensor.data<int>();
|
||||
|
||||
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
three_interpolate_kernel_launcher_fast(b, c, m, n, points, idx, weight, out, stream);
|
||||
}
|
||||
|
||||
void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m,
|
||||
at::Tensor grad_out_tensor,
|
||||
at::Tensor idx_tensor,
|
||||
at::Tensor weight_tensor,
|
||||
at::Tensor grad_points_tensor) {
|
||||
|
||||
const float *grad_out = grad_out_tensor.data<float>();
|
||||
const float *weight = weight_tensor.data<float>();
|
||||
float *grad_points = grad_points_tensor.data<float>();
|
||||
const int *idx = idx_tensor.data<int>();
|
||||
|
||||
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
three_interpolate_grad_kernel_launcher_fast(b, c, n, m, grad_out, idx, weight, grad_points, stream);
|
||||
}
|
161
modules/module_lib/pointnet2_utils/pointnet2/src/interpolate_gpu.cu
Executable file
161
modules/module_lib/pointnet2_utils/pointnet2/src/interpolate_gpu.cu
Executable file
@@ -0,0 +1,161 @@
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "cuda_utils.h"
|
||||
#include "interpolate_gpu.h"
|
||||
|
||||
|
||||
__global__ void three_nn_kernel_fast(int b, int n, int m, const float *__restrict__ unknown,
|
||||
const float *__restrict__ known, float *__restrict__ dist2, int *__restrict__ idx) {
|
||||
// unknown: (B, N, 3)
|
||||
// known: (B, M, 3)
|
||||
// output:
|
||||
// dist2: (B, N, 3)
|
||||
// idx: (B, N, 3)
|
||||
|
||||
int bs_idx = blockIdx.y;
|
||||
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
if (bs_idx >= b || pt_idx >= n) return;
|
||||
|
||||
unknown += bs_idx * n * 3 + pt_idx * 3;
|
||||
known += bs_idx * m * 3;
|
||||
dist2 += bs_idx * n * 3 + pt_idx * 3;
|
||||
idx += bs_idx * n * 3 + pt_idx * 3;
|
||||
|
||||
float ux = unknown[0];
|
||||
float uy = unknown[1];
|
||||
float uz = unknown[2];
|
||||
|
||||
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
|
||||
int besti1 = 0, besti2 = 0, besti3 = 0;
|
||||
for (int k = 0; k < m; ++k) {
|
||||
float x = known[k * 3 + 0];
|
||||
float y = known[k * 3 + 1];
|
||||
float z = known[k * 3 + 2];
|
||||
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
|
||||
if (d < best1) {
|
||||
best3 = best2; besti3 = besti2;
|
||||
best2 = best1; besti2 = besti1;
|
||||
best1 = d; besti1 = k;
|
||||
}
|
||||
else if (d < best2) {
|
||||
best3 = best2; besti3 = besti2;
|
||||
best2 = d; besti2 = k;
|
||||
}
|
||||
else if (d < best3) {
|
||||
best3 = d; besti3 = k;
|
||||
}
|
||||
}
|
||||
dist2[0] = best1; dist2[1] = best2; dist2[2] = best3;
|
||||
idx[0] = besti1; idx[1] = besti2; idx[2] = besti3;
|
||||
}
|
||||
|
||||
|
||||
void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown,
|
||||
const float *known, float *dist2, int *idx, cudaStream_t stream) {
|
||||
// unknown: (B, N, 3)
|
||||
// known: (B, M, 3)
|
||||
// output:
|
||||
// dist2: (B, N, 3)
|
||||
// idx: (B, N, 3)
|
||||
|
||||
cudaError_t err;
|
||||
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row)
|
||||
dim3 threads(THREADS_PER_BLOCK);
|
||||
|
||||
three_nn_kernel_fast<<<blocks, threads, 0, stream>>>(b, n, m, unknown, known, dist2, idx);
|
||||
|
||||
err = cudaGetLastError();
|
||||
if (cudaSuccess != err) {
|
||||
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__global__ void three_interpolate_kernel_fast(int b, int c, int m, int n, const float *__restrict__ points,
|
||||
const int *__restrict__ idx, const float *__restrict__ weight, float *__restrict__ out) {
|
||||
// points: (B, C, M)
|
||||
// idx: (B, N, 3)
|
||||
// weight: (B, N, 3)
|
||||
// output:
|
||||
// out: (B, C, N)
|
||||
|
||||
int bs_idx = blockIdx.z;
|
||||
int c_idx = blockIdx.y;
|
||||
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
|
||||
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
|
||||
|
||||
weight += bs_idx * n * 3 + pt_idx * 3;
|
||||
points += bs_idx * c * m + c_idx * m;
|
||||
idx += bs_idx * n * 3 + pt_idx * 3;
|
||||
out += bs_idx * c * n + c_idx * n;
|
||||
|
||||
out[pt_idx] = weight[0] * points[idx[0]] + weight[1] * points[idx[1]] + weight[2] * points[idx[2]];
|
||||
}
|
||||
|
||||
void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n,
|
||||
const float *points, const int *idx, const float *weight, float *out, cudaStream_t stream) {
|
||||
// points: (B, C, M)
|
||||
// idx: (B, N, 3)
|
||||
// weight: (B, N, 3)
|
||||
// output:
|
||||
// out: (B, C, N)
|
||||
|
||||
cudaError_t err;
|
||||
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
|
||||
dim3 threads(THREADS_PER_BLOCK);
|
||||
three_interpolate_kernel_fast<<<blocks, threads, 0, stream>>>(b, c, m, n, points, idx, weight, out);
|
||||
|
||||
err = cudaGetLastError();
|
||||
if (cudaSuccess != err) {
|
||||
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__global__ void three_interpolate_grad_kernel_fast(int b, int c, int n, int m, const float *__restrict__ grad_out,
|
||||
const int *__restrict__ idx, const float *__restrict__ weight, float *__restrict__ grad_points) {
|
||||
// grad_out: (B, C, N)
|
||||
// weight: (B, N, 3)
|
||||
// output:
|
||||
// grad_points: (B, C, M)
|
||||
|
||||
int bs_idx = blockIdx.z;
|
||||
int c_idx = blockIdx.y;
|
||||
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
|
||||
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
|
||||
|
||||
grad_out += bs_idx * c * n + c_idx * n + pt_idx;
|
||||
weight += bs_idx * n * 3 + pt_idx * 3;
|
||||
grad_points += bs_idx * c * m + c_idx * m;
|
||||
idx += bs_idx * n * 3 + pt_idx * 3;
|
||||
|
||||
|
||||
atomicAdd(grad_points + idx[0], grad_out[0] * weight[0]);
|
||||
atomicAdd(grad_points + idx[1], grad_out[0] * weight[1]);
|
||||
atomicAdd(grad_points + idx[2], grad_out[0] * weight[2]);
|
||||
}
|
||||
|
||||
void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out,
|
||||
const int *idx, const float *weight, float *grad_points, cudaStream_t stream) {
|
||||
// grad_out: (B, C, N)
|
||||
// weight: (B, N, 3)
|
||||
// output:
|
||||
// grad_points: (B, C, M)
|
||||
|
||||
cudaError_t err;
|
||||
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
|
||||
dim3 threads(THREADS_PER_BLOCK);
|
||||
three_interpolate_grad_kernel_fast<<<blocks, threads, 0, stream>>>(b, c, n, m, grad_out, idx, weight, grad_points);
|
||||
|
||||
err = cudaGetLastError();
|
||||
if (cudaSuccess != err) {
|
||||
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
|
||||
exit(-1);
|
||||
}
|
||||
}
|
30
modules/module_lib/pointnet2_utils/pointnet2/src/interpolate_gpu.h
Executable file
30
modules/module_lib/pointnet2_utils/pointnet2/src/interpolate_gpu.h
Executable file
@@ -0,0 +1,30 @@
|
||||
#ifndef _INTERPOLATE_GPU_H
|
||||
#define _INTERPOLATE_GPU_H
|
||||
|
||||
#include <torch/serialize/tensor.h>
|
||||
#include<vector>
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime_api.h>
|
||||
|
||||
|
||||
void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor,
|
||||
at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor);
|
||||
|
||||
void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown,
|
||||
const float *known, float *dist2, int *idx, cudaStream_t stream);
|
||||
|
||||
|
||||
void three_interpolate_wrapper_fast(int b, int c, int m, int n, at::Tensor points_tensor,
|
||||
at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor);
|
||||
|
||||
void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n,
|
||||
const float *points, const int *idx, const float *weight, float *out, cudaStream_t stream);
|
||||
|
||||
|
||||
void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m, at::Tensor grad_out_tensor,
|
||||
at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor grad_points_tensor);
|
||||
|
||||
void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out,
|
||||
const int *idx, const float *weight, float *grad_points, cudaStream_t stream);
|
||||
|
||||
#endif
|
24
modules/module_lib/pointnet2_utils/pointnet2/src/pointnet2_api.cpp
Executable file
24
modules/module_lib/pointnet2_utils/pointnet2/src/pointnet2_api.cpp
Executable file
@@ -0,0 +1,24 @@
|
||||
#include <torch/serialize/tensor.h>
|
||||
#include <torch/extension.h>
|
||||
|
||||
#include "ball_query_gpu.h"
|
||||
#include "group_points_gpu.h"
|
||||
#include "sampling_gpu.h"
|
||||
#include "interpolate_gpu.h"
|
||||
|
||||
|
||||
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
||||
m.def("ball_query_wrapper", &ball_query_wrapper_fast, "ball_query_wrapper_fast");
|
||||
|
||||
m.def("group_points_wrapper", &group_points_wrapper_fast, "group_points_wrapper_fast");
|
||||
m.def("group_points_grad_wrapper", &group_points_grad_wrapper_fast, "group_points_grad_wrapper_fast");
|
||||
|
||||
m.def("gather_points_wrapper", &gather_points_wrapper_fast, "gather_points_wrapper_fast");
|
||||
m.def("gather_points_grad_wrapper", &gather_points_grad_wrapper_fast, "gather_points_grad_wrapper_fast");
|
||||
|
||||
m.def("furthest_point_sampling_wrapper", &furthest_point_sampling_wrapper, "furthest_point_sampling_wrapper");
|
||||
|
||||
m.def("three_nn_wrapper", &three_nn_wrapper_fast, "three_nn_wrapper_fast");
|
||||
m.def("three_interpolate_wrapper", &three_interpolate_wrapper_fast, "three_interpolate_wrapper_fast");
|
||||
m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_fast, "three_interpolate_grad_wrapper_fast");
|
||||
}
|
51
modules/module_lib/pointnet2_utils/pointnet2/src/sampling.cpp
Executable file
51
modules/module_lib/pointnet2_utils/pointnet2/src/sampling.cpp
Executable file
@@ -0,0 +1,51 @@
|
||||
#include <torch/serialize/tensor.h>
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#include <vector>
|
||||
// #include <THC/THC.h>
|
||||
|
||||
#include "sampling_gpu.h"
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#include <ATen/cuda/CUDAEvent.h>
|
||||
|
||||
// extern THCState *state;
|
||||
|
||||
|
||||
int gather_points_wrapper_fast(int b, int c, int n, int npoints,
|
||||
at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor){
|
||||
const float *points = points_tensor.data<float>();
|
||||
const int *idx = idx_tensor.data<int>();
|
||||
float *out = out_tensor.data<float>();
|
||||
|
||||
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
gather_points_kernel_launcher_fast(b, c, n, npoints, points, idx, out, stream);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints,
|
||||
at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) {
|
||||
|
||||
const float *grad_out = grad_out_tensor.data<float>();
|
||||
const int *idx = idx_tensor.data<int>();
|
||||
float *grad_points = grad_points_tensor.data<float>();
|
||||
|
||||
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
gather_points_grad_kernel_launcher_fast(b, c, n, npoints, grad_out, idx, grad_points, stream);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
int furthest_point_sampling_wrapper(int b, int n, int m,
|
||||
at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) {
|
||||
|
||||
const float *points = points_tensor.data<float>();
|
||||
float *temp = temp_tensor.data<float>();
|
||||
int *idx = idx_tensor.data<int>();
|
||||
|
||||
// cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
||||
furthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx, stream);
|
||||
return 1;
|
||||
}
|
253
modules/module_lib/pointnet2_utils/pointnet2/src/sampling_gpu.cu
Executable file
253
modules/module_lib/pointnet2_utils/pointnet2/src/sampling_gpu.cu
Executable file
@@ -0,0 +1,253 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "cuda_utils.h"
|
||||
#include "sampling_gpu.h"
|
||||
|
||||
|
||||
__global__ void gather_points_kernel_fast(int b, int c, int n, int m,
|
||||
const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) {
|
||||
// points: (B, C, N)
|
||||
// idx: (B, M)
|
||||
// output:
|
||||
// out: (B, C, M)
|
||||
|
||||
int bs_idx = blockIdx.z;
|
||||
int c_idx = blockIdx.y;
|
||||
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
if (bs_idx >= b || c_idx >= c || pt_idx >= m) return;
|
||||
|
||||
out += bs_idx * c * m + c_idx * m + pt_idx;
|
||||
idx += bs_idx * m + pt_idx;
|
||||
points += bs_idx * c * n + c_idx * n;
|
||||
out[0] = points[idx[0]];
|
||||
}
|
||||
|
||||
void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints,
|
||||
const float *points, const int *idx, float *out, cudaStream_t stream) {
|
||||
// points: (B, C, N)
|
||||
// idx: (B, npoints)
|
||||
// output:
|
||||
// out: (B, C, npoints)
|
||||
|
||||
cudaError_t err;
|
||||
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
|
||||
dim3 threads(THREADS_PER_BLOCK);
|
||||
|
||||
gather_points_kernel_fast<<<blocks, threads, 0, stream>>>(b, c, n, npoints, points, idx, out);
|
||||
|
||||
err = cudaGetLastError();
|
||||
if (cudaSuccess != err) {
|
||||
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
__global__ void gather_points_grad_kernel_fast(int b, int c, int n, int m, const float *__restrict__ grad_out,
|
||||
const int *__restrict__ idx, float *__restrict__ grad_points) {
|
||||
// grad_out: (B, C, M)
|
||||
// idx: (B, M)
|
||||
// output:
|
||||
// grad_points: (B, C, N)
|
||||
|
||||
int bs_idx = blockIdx.z;
|
||||
int c_idx = blockIdx.y;
|
||||
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
if (bs_idx >= b || c_idx >= c || pt_idx >= m) return;
|
||||
|
||||
grad_out += bs_idx * c * m + c_idx * m + pt_idx;
|
||||
idx += bs_idx * m + pt_idx;
|
||||
grad_points += bs_idx * c * n + c_idx * n;
|
||||
|
||||
atomicAdd(grad_points + idx[0], grad_out[0]);
|
||||
}
|
||||
|
||||
void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints,
|
||||
const float *grad_out, const int *idx, float *grad_points, cudaStream_t stream) {
|
||||
// grad_out: (B, C, npoints)
|
||||
// idx: (B, npoints)
|
||||
// output:
|
||||
// grad_points: (B, C, N)
|
||||
|
||||
cudaError_t err;
|
||||
dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
|
||||
dim3 threads(THREADS_PER_BLOCK);
|
||||
|
||||
gather_points_grad_kernel_fast<<<blocks, threads, 0, stream>>>(b, c, n, npoints, grad_out, idx, grad_points);
|
||||
|
||||
err = cudaGetLastError();
|
||||
if (cudaSuccess != err) {
|
||||
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2){
|
||||
const float v1 = dists[idx1], v2 = dists[idx2];
|
||||
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
|
||||
dists[idx1] = max(v1, v2);
|
||||
dists_i[idx1] = v2 > v1 ? i2 : i1;
|
||||
}
|
||||
|
||||
template <unsigned int block_size>
|
||||
__global__ void furthest_point_sampling_kernel(int b, int n, int m,
|
||||
const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) {
|
||||
// dataset: (B, N, 3)
|
||||
// tmp: (B, N)
|
||||
// output:
|
||||
// idx: (B, M)
|
||||
|
||||
if (m <= 0) return;
|
||||
__shared__ float dists[block_size];
|
||||
__shared__ int dists_i[block_size];
|
||||
|
||||
int batch_index = blockIdx.x;
|
||||
dataset += batch_index * n * 3;
|
||||
temp += batch_index * n;
|
||||
idxs += batch_index * m;
|
||||
|
||||
int tid = threadIdx.x;
|
||||
const int stride = block_size;
|
||||
|
||||
int old = 0;
|
||||
if (threadIdx.x == 0)
|
||||
idxs[0] = old;
|
||||
|
||||
__syncthreads();
|
||||
for (int j = 1; j < m; j++) {
|
||||
int besti = 0;
|
||||
float best = -1;
|
||||
float x1 = dataset[old * 3 + 0];
|
||||
float y1 = dataset[old * 3 + 1];
|
||||
float z1 = dataset[old * 3 + 2];
|
||||
for (int k = tid; k < n; k += stride) {
|
||||
float x2, y2, z2;
|
||||
x2 = dataset[k * 3 + 0];
|
||||
y2 = dataset[k * 3 + 1];
|
||||
z2 = dataset[k * 3 + 2];
|
||||
// float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
|
||||
// if (mag <= 1e-3)
|
||||
// continue;
|
||||
|
||||
float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
|
||||
float d2 = min(d, temp[k]);
|
||||
temp[k] = d2;
|
||||
besti = d2 > best ? k : besti;
|
||||
best = d2 > best ? d2 : best;
|
||||
}
|
||||
dists[tid] = best;
|
||||
dists_i[tid] = besti;
|
||||
__syncthreads();
|
||||
|
||||
if (block_size >= 1024) {
|
||||
if (tid < 512) {
|
||||
__update(dists, dists_i, tid, tid + 512);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
if (block_size >= 512) {
|
||||
if (tid < 256) {
|
||||
__update(dists, dists_i, tid, tid + 256);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (block_size >= 256) {
|
||||
if (tid < 128) {
|
||||
__update(dists, dists_i, tid, tid + 128);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (block_size >= 128) {
|
||||
if (tid < 64) {
|
||||
__update(dists, dists_i, tid, tid + 64);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (block_size >= 64) {
|
||||
if (tid < 32) {
|
||||
__update(dists, dists_i, tid, tid + 32);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (block_size >= 32) {
|
||||
if (tid < 16) {
|
||||
__update(dists, dists_i, tid, tid + 16);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (block_size >= 16) {
|
||||
if (tid < 8) {
|
||||
__update(dists, dists_i, tid, tid + 8);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (block_size >= 8) {
|
||||
if (tid < 4) {
|
||||
__update(dists, dists_i, tid, tid + 4);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (block_size >= 4) {
|
||||
if (tid < 2) {
|
||||
__update(dists, dists_i, tid, tid + 2);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (block_size >= 2) {
|
||||
if (tid < 1) {
|
||||
__update(dists, dists_i, tid, tid + 1);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
old = dists_i[0];
|
||||
if (tid == 0)
|
||||
idxs[j] = old;
|
||||
}
|
||||
}
|
||||
|
||||
void furthest_point_sampling_kernel_launcher(int b, int n, int m,
|
||||
const float *dataset, float *temp, int *idxs, cudaStream_t stream) {
|
||||
// dataset: (B, N, 3)
|
||||
// tmp: (B, N)
|
||||
// output:
|
||||
// idx: (B, M)
|
||||
|
||||
cudaError_t err;
|
||||
unsigned int n_threads = opt_n_threads(n);
|
||||
|
||||
switch (n_threads) {
|
||||
case 1024:
|
||||
furthest_point_sampling_kernel<1024><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
|
||||
case 512:
|
||||
furthest_point_sampling_kernel<512><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
|
||||
case 256:
|
||||
furthest_point_sampling_kernel<256><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
|
||||
case 128:
|
||||
furthest_point_sampling_kernel<128><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
|
||||
case 64:
|
||||
furthest_point_sampling_kernel<64><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
|
||||
case 32:
|
||||
furthest_point_sampling_kernel<32><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
|
||||
case 16:
|
||||
furthest_point_sampling_kernel<16><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
|
||||
case 8:
|
||||
furthest_point_sampling_kernel<8><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
|
||||
case 4:
|
||||
furthest_point_sampling_kernel<4><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
|
||||
case 2:
|
||||
furthest_point_sampling_kernel<2><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
|
||||
case 1:
|
||||
furthest_point_sampling_kernel<1><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs); break;
|
||||
default:
|
||||
furthest_point_sampling_kernel<512><<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
|
||||
}
|
||||
|
||||
err = cudaGetLastError();
|
||||
if (cudaSuccess != err) {
|
||||
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
|
||||
exit(-1);
|
||||
}
|
||||
}
|
29
modules/module_lib/pointnet2_utils/pointnet2/src/sampling_gpu.h
Executable file
29
modules/module_lib/pointnet2_utils/pointnet2/src/sampling_gpu.h
Executable file
@@ -0,0 +1,29 @@
|
||||
#ifndef _SAMPLING_GPU_H
|
||||
#define _SAMPLING_GPU_H
|
||||
|
||||
#include <torch/serialize/tensor.h>
|
||||
#include <ATen/cuda/CUDAContext.h>
|
||||
#include<vector>
|
||||
|
||||
|
||||
int gather_points_wrapper_fast(int b, int c, int n, int npoints,
|
||||
at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor);
|
||||
|
||||
void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints,
|
||||
const float *points, const int *idx, float *out, cudaStream_t stream);
|
||||
|
||||
|
||||
int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints,
|
||||
at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor);
|
||||
|
||||
void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints,
|
||||
const float *grad_out, const int *idx, float *grad_points, cudaStream_t stream);
|
||||
|
||||
|
||||
int furthest_point_sampling_wrapper(int b, int n, int m,
|
||||
at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor);
|
||||
|
||||
void furthest_point_sampling_kernel_launcher(int b, int n, int m,
|
||||
const float *dataset, float *temp, int *idxs, cudaStream_t stream);
|
||||
|
||||
#endif
|
Reference in New Issue
Block a user