This commit is contained in:
2024-10-09 16:13:22 +00:00
commit 0ea3f048dc
437 changed files with 44406 additions and 0 deletions

View File

@@ -0,0 +1,10 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <torch/extension.h>
at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius,
const int nsample);

View File

@@ -0,0 +1,46 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#ifndef _CUDA_UTILS_H
#define _CUDA_UTILS_H
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cmath>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#define TOTAL_THREADS 512
inline int opt_n_threads(int work_size) {
const int pow_2 = std::log(static_cast<double>(work_size)) / std::log(2.0);
return max(min(1 << pow_2, TOTAL_THREADS), 1);
}
inline dim3 opt_block_config(int x, int y) {
const int x_threads = opt_n_threads(x);
const int y_threads =
max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1);
dim3 block_config(x_threads, y_threads, 1);
return block_config;
}
#define CUDA_CHECK_ERRORS() \
do { \
cudaError_t err = cudaGetLastError(); \
if (cudaSuccess != err) { \
fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \
cudaGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \
__FILE__); \
exit(-1); \
} \
} while (0)
#endif

View File

@@ -0,0 +1,7 @@
// Author: chenxi-wang
#pragma once
#include <torch/extension.h>
at::Tensor cylinder_query(at::Tensor new_xyz, at::Tensor xyz, at::Tensor rot, const float radius, const float hmin, const float hmax,
const int nsample);

View File

@@ -0,0 +1,10 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <torch/extension.h>
at::Tensor group_points(at::Tensor points, at::Tensor idx);
at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n);

View File

@@ -0,0 +1,15 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <torch/extension.h>
#include <vector>
std::vector<at::Tensor> three_nn(at::Tensor unknowns, at::Tensor knows);
at::Tensor three_interpolate(at::Tensor points, at::Tensor idx,
at::Tensor weight);
at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx,
at::Tensor weight, const int m);

View File

@@ -0,0 +1,11 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <torch/extension.h>
at::Tensor gather_points(at::Tensor points, at::Tensor idx);
at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, const int n);
at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples);

View File

@@ -0,0 +1,30 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#define CHECK_CUDA(x) \
do { \
TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor"); \
} while (0)
#define CHECK_CONTIGUOUS(x) \
do { \
TORCH_CHECK(x.is_contiguous(), #x " must be a contiguous tensor"); \
} while (0)
#define CHECK_IS_INT(x) \
do { \
TORCH_CHECK(x.scalar_type() == at::ScalarType::Int, \
#x " must be an int tensor"); \
} while (0)
#define CHECK_IS_FLOAT(x) \
do { \
TORCH_CHECK(x.scalar_type() == at::ScalarType::Float, \
#x " must be a float tensor"); \
} while (0)

View File

@@ -0,0 +1,37 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include "ball_query.h"
#include "utils.h"
void query_ball_point_kernel_wrapper(int b, int n, int m, float radius,
int nsample, const float *new_xyz,
const float *xyz, int *idx);
at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius,
const int nsample) {
CHECK_CONTIGUOUS(new_xyz);
CHECK_CONTIGUOUS(xyz);
CHECK_IS_FLOAT(new_xyz);
CHECK_IS_FLOAT(xyz);
if (new_xyz.type().is_cuda()) {
CHECK_CUDA(xyz);
}
at::Tensor idx =
torch::zeros({new_xyz.size(0), new_xyz.size(1), nsample},
at::device(new_xyz.device()).dtype(at::ScalarType::Int));
if (new_xyz.type().is_cuda()) {
query_ball_point_kernel_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1),
radius, nsample, new_xyz.data<float>(),
xyz.data<float>(), idx.data<int>());
} else {
TORCH_CHECK(false, "CPU not supported");
}
return idx;
}

View File

@@ -0,0 +1,59 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: new_xyz(b, m, 3) xyz(b, n, 3)
// output: idx(b, m, nsample)
__global__ void query_ball_point_kernel(int b, int n, int m, float radius,
int nsample,
const float *__restrict__ new_xyz,
const float *__restrict__ xyz,
int *__restrict__ idx) {
int batch_index = blockIdx.x;
xyz += batch_index * n * 3;
new_xyz += batch_index * m * 3;
idx += m * nsample * batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride) {
float new_x = new_xyz[j * 3 + 0];
float new_y = new_xyz[j * 3 + 1];
float new_z = new_xyz[j * 3 + 2];
for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) +
(new_z - z) * (new_z - z);
if (d2 < radius2) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[j * nsample + l] = k;
}
}
idx[j * nsample + cnt] = k;
++cnt;
}
}
}
}
void query_ball_point_kernel_wrapper(int b, int n, int m, float radius,
int nsample, const float *new_xyz,
const float *xyz, int *idx) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
query_ball_point_kernel<<<b, opt_n_threads(m), 0, stream>>>(
b, n, m, radius, nsample, new_xyz, xyz, idx);
CUDA_CHECK_ERRORS();
}

View File

@@ -0,0 +1,27 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include "ball_query.h"
#include "group_points.h"
#include "interpolate.h"
#include "sampling.h"
#include "cylinder_query.h"
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("gather_points", &gather_points);
m.def("gather_points_grad", &gather_points_grad);
m.def("furthest_point_sampling", &furthest_point_sampling);
m.def("three_nn", &three_nn);
m.def("three_interpolate", &three_interpolate);
m.def("three_interpolate_grad", &three_interpolate_grad);
m.def("ball_query", &ball_query);
m.def("group_points", &group_points);
m.def("group_points_grad", &group_points_grad);
m.def("cylinder_query", &cylinder_query);
}

View File

@@ -0,0 +1,37 @@
// Author: chenxi-wang
#include "cylinder_query.h"
#include "utils.h"
void query_cylinder_point_kernel_wrapper(int b, int n, int m, float radius, float hmin, float hmax,
int nsample, const float *new_xyz,
const float *xyz, const float *rot, int *idx);
at::Tensor cylinder_query(at::Tensor new_xyz, at::Tensor xyz, at::Tensor rot, const float radius, const float hmin, const float hmax,
const int nsample) {
CHECK_CONTIGUOUS(new_xyz);
CHECK_CONTIGUOUS(xyz);
CHECK_CONTIGUOUS(rot);
CHECK_IS_FLOAT(new_xyz);
CHECK_IS_FLOAT(xyz);
CHECK_IS_FLOAT(rot);
if (new_xyz.type().is_cuda()) {
CHECK_CUDA(xyz);
CHECK_CUDA(rot);
}
at::Tensor idx =
torch::zeros({new_xyz.size(0), new_xyz.size(1), nsample},
at::device(new_xyz.device()).dtype(at::ScalarType::Int));
if (new_xyz.type().is_cuda()) {
query_cylinder_point_kernel_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1),
radius, hmin, hmax, nsample, new_xyz.data<float>(),
xyz.data<float>(), rot.data<float>(), idx.data<int>());
} else {
TORCH_CHECK(false, "CPU not supported");
}
return idx;
}

View File

@@ -0,0 +1,67 @@
// Author: chenxi-wang
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
__global__ void query_cylinder_point_kernel(int b, int n, int m, float radius, float hmin, float hmax,
int nsample,
const float *__restrict__ new_xyz,
const float *__restrict__ xyz,
const float *__restrict__ rot,
int *__restrict__ idx) {
int batch_index = blockIdx.x;
xyz += batch_index * n * 3;
new_xyz += batch_index * m * 3;
rot += batch_index * m * 9;
idx += m * nsample * batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride) {
float new_x = new_xyz[j * 3 + 0];
float new_y = new_xyz[j * 3 + 1];
float new_z = new_xyz[j * 3 + 2];
float r0 = rot[j * 9 + 0];
float r1 = rot[j * 9 + 1];
float r2 = rot[j * 9 + 2];
float r3 = rot[j * 9 + 3];
float r4 = rot[j * 9 + 4];
float r5 = rot[j * 9 + 5];
float r6 = rot[j * 9 + 6];
float r7 = rot[j * 9 + 7];
float r8 = rot[j * 9 + 8];
for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) {
float x = xyz[k * 3 + 0] - new_x;
float y = xyz[k * 3 + 1] - new_y;
float z = xyz[k * 3 + 2] - new_z;
float x_rot = r0 * x + r3 * y + r6 * z;
float y_rot = r1 * x + r4 * y + r7 * z;
float z_rot = r2 * x + r5 * y + r8 * z;
float d2 = y_rot * y_rot + z_rot * z_rot;
if (d2 < radius2 && x_rot > hmin && x_rot < hmax) {
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[j * nsample + l] = k;
}
}
idx[j * nsample + cnt] = k;
++cnt;
}
}
}
}
void query_cylinder_point_kernel_wrapper(int b, int n, int m, float radius, float hmin, float hmax,
int nsample, const float *new_xyz,
const float *xyz, const float *rot, int *idx) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
query_cylinder_point_kernel<<<b, opt_n_threads(m), 0, stream>>>(
b, n, m, radius, hmin, hmax, nsample, new_xyz, xyz, rot, idx);
CUDA_CHECK_ERRORS();
}

View File

@@ -0,0 +1,65 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include "group_points.h"
#include "utils.h"
void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx,
float *out);
void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
int nsample, const float *grad_out,
const int *idx, float *grad_points);
at::Tensor group_points(at::Tensor points, at::Tensor idx) {
CHECK_CONTIGUOUS(points);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(points);
CHECK_IS_INT(idx);
if (points.type().is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({points.size(0), points.size(1), idx.size(1), idx.size(2)},
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.type().is_cuda()) {
group_points_kernel_wrapper(points.size(0), points.size(1), points.size(2),
idx.size(1), idx.size(2), points.data<float>(),
idx.data<int>(), output.data<float>());
} else {
TORCH_CHECK(false, "CPU not supported");
}
return output;
}
at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n) {
CHECK_CONTIGUOUS(grad_out);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(grad_out);
CHECK_IS_INT(idx);
if (grad_out.type().is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({grad_out.size(0), grad_out.size(1), n},
at::device(grad_out.device()).dtype(at::ScalarType::Float));
if (grad_out.type().is_cuda()) {
group_points_grad_kernel_wrapper(
grad_out.size(0), grad_out.size(1), n, idx.size(1), idx.size(2),
grad_out.data<float>(), idx.data<int>(), output.data<float>());
} else {
TORCH_CHECK(false, "CPU not supported");
}
return output;
}

View File

@@ -0,0 +1,80 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: points(b, c, n) idx(b, npoints, nsample)
// output: out(b, c, npoints, nsample)
__global__ void group_points_kernel(int b, int c, int n, int npoints,
int nsample,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
int batch_index = blockIdx.x;
points += batch_index * n * c;
idx += batch_index * npoints * nsample;
out += batch_index * npoints * nsample * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
out[(l * npoints + j) * nsample + k] = points[l * n + ii];
}
}
}
void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx,
float *out) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
group_points_kernel<<<b, opt_block_config(npoints, c), 0, stream>>>(
b, c, n, npoints, nsample, points, idx, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample)
// output: grad_points(b, c, n)
__global__ void group_points_grad_kernel(int b, int c, int n, int npoints,
int nsample,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
int batch_index = blockIdx.x;
grad_out += batch_index * npoints * nsample * c;
idx += batch_index * npoints * nsample;
grad_points += batch_index * n * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * npoints; i += stride) {
const int l = i / npoints;
const int j = i % npoints;
for (int k = 0; k < nsample; ++k) {
int ii = idx[j * nsample + k];
atomicAdd(grad_points + l * n + ii,
grad_out[(l * npoints + j) * nsample + k]);
}
}
}
void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
int nsample, const float *grad_out,
const int *idx, float *grad_points) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
group_points_grad_kernel<<<b, opt_block_config(npoints, c), 0, stream>>>(
b, c, n, npoints, nsample, grad_out, idx, grad_points);
CUDA_CHECK_ERRORS();
}

View File

@@ -0,0 +1,104 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include "interpolate.h"
#include "utils.h"
void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown,
const float *known, float *dist2, int *idx);
void three_interpolate_kernel_wrapper(int b, int c, int m, int n,
const float *points, const int *idx,
const float *weight, float *out);
void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m,
const float *grad_out,
const int *idx, const float *weight,
float *grad_points);
std::vector<at::Tensor> three_nn(at::Tensor unknowns, at::Tensor knows) {
CHECK_CONTIGUOUS(unknowns);
CHECK_CONTIGUOUS(knows);
CHECK_IS_FLOAT(unknowns);
CHECK_IS_FLOAT(knows);
if (unknowns.type().is_cuda()) {
CHECK_CUDA(knows);
}
at::Tensor idx =
torch::zeros({unknowns.size(0), unknowns.size(1), 3},
at::device(unknowns.device()).dtype(at::ScalarType::Int));
at::Tensor dist2 =
torch::zeros({unknowns.size(0), unknowns.size(1), 3},
at::device(unknowns.device()).dtype(at::ScalarType::Float));
if (unknowns.type().is_cuda()) {
three_nn_kernel_wrapper(unknowns.size(0), unknowns.size(1), knows.size(1),
unknowns.data<float>(), knows.data<float>(),
dist2.data<float>(), idx.data<int>());
} else {
TORCH_CHECK(false, "CPU not supported");
}
return {dist2, idx};
}
at::Tensor three_interpolate(at::Tensor points, at::Tensor idx,
at::Tensor weight) {
CHECK_CONTIGUOUS(points);
CHECK_CONTIGUOUS(idx);
CHECK_CONTIGUOUS(weight);
CHECK_IS_FLOAT(points);
CHECK_IS_INT(idx);
CHECK_IS_FLOAT(weight);
if (points.type().is_cuda()) {
CHECK_CUDA(idx);
CHECK_CUDA(weight);
}
at::Tensor output =
torch::zeros({points.size(0), points.size(1), idx.size(1)},
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.type().is_cuda()) {
three_interpolate_kernel_wrapper(
points.size(0), points.size(1), points.size(2), idx.size(1),
points.data<float>(), idx.data<int>(), weight.data<float>(),
output.data<float>());
} else {
TORCH_CHECK(false, "CPU not supported");
}
return output;
}
at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx,
at::Tensor weight, const int m) {
CHECK_CONTIGUOUS(grad_out);
CHECK_CONTIGUOUS(idx);
CHECK_CONTIGUOUS(weight);
CHECK_IS_FLOAT(grad_out);
CHECK_IS_INT(idx);
CHECK_IS_FLOAT(weight);
if (grad_out.type().is_cuda()) {
CHECK_CUDA(idx);
CHECK_CUDA(weight);
}
at::Tensor output =
torch::zeros({grad_out.size(0), grad_out.size(1), m},
at::device(grad_out.device()).dtype(at::ScalarType::Float));
if (grad_out.type().is_cuda()) {
three_interpolate_grad_kernel_wrapper(
grad_out.size(0), grad_out.size(1), grad_out.size(2), m,
grad_out.data<float>(), idx.data<int>(), weight.data<float>(),
output.data<float>());
} else {
TORCH_CHECK(false, "CPU not supported");
}
return output;
}

View File

@@ -0,0 +1,159 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: unknown(b, n, 3) known(b, m, 3)
// output: dist2(b, n, 3), idx(b, n, 3)
__global__ void three_nn_kernel(int b, int n, int m,
const float *__restrict__ unknown,
const float *__restrict__ known,
float *__restrict__ dist2,
int *__restrict__ idx) {
int batch_index = blockIdx.x;
unknown += batch_index * n * 3;
known += batch_index * m * 3;
dist2 += batch_index * n * 3;
idx += batch_index * n * 3;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j = index; j < n; j += stride) {
float ux = unknown[j * 3 + 0];
float uy = unknown[j * 3 + 1];
float uz = unknown[j * 3 + 2];
double best1 = 1e40, best2 = 1e40, best3 = 1e40;
int besti1 = 0, besti2 = 0, besti3 = 0;
for (int k = 0; k < m; ++k) {
float x = known[k * 3 + 0];
float y = known[k * 3 + 1];
float z = known[k * 3 + 2];
float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
if (d < best1) {
best3 = best2;
besti3 = besti2;
best2 = best1;
besti2 = besti1;
best1 = d;
besti1 = k;
} else if (d < best2) {
best3 = best2;
besti3 = besti2;
best2 = d;
besti2 = k;
} else if (d < best3) {
best3 = d;
besti3 = k;
}
}
dist2[j * 3 + 0] = best1;
dist2[j * 3 + 1] = best2;
dist2[j * 3 + 2] = best3;
idx[j * 3 + 0] = besti1;
idx[j * 3 + 1] = besti2;
idx[j * 3 + 2] = besti3;
}
}
void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown,
const float *known, float *dist2, int *idx) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_nn_kernel<<<b, opt_n_threads(n), 0, stream>>>(b, n, m, unknown, known,
dist2, idx);
CUDA_CHECK_ERRORS();
}
// input: points(b, c, m), idx(b, n, 3), weight(b, n, 3)
// output: out(b, c, n)
__global__ void three_interpolate_kernel(int b, int c, int m, int n,
const float *__restrict__ points,
const int *__restrict__ idx,
const float *__restrict__ weight,
float *__restrict__ out) {
int batch_index = blockIdx.x;
points += batch_index * m * c;
idx += batch_index * n * 3;
weight += batch_index * n * 3;
out += batch_index * n * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * n; i += stride) {
const int l = i / n;
const int j = i % n;
float w1 = weight[j * 3 + 0];
float w2 = weight[j * 3 + 1];
float w3 = weight[j * 3 + 2];
int i1 = idx[j * 3 + 0];
int i2 = idx[j * 3 + 1];
int i3 = idx[j * 3 + 2];
out[i] = points[l * m + i1] * w1 + points[l * m + i2] * w2 +
points[l * m + i3] * w3;
}
}
void three_interpolate_kernel_wrapper(int b, int c, int m, int n,
const float *points, const int *idx,
const float *weight, float *out) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_interpolate_kernel<<<b, opt_block_config(n, c), 0, stream>>>(
b, c, m, n, points, idx, weight, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, n), idx(b, n, 3), weight(b, n, 3)
// output: grad_points(b, c, m)
__global__ void three_interpolate_grad_kernel(
int b, int c, int n, int m, const float *__restrict__ grad_out,
const int *__restrict__ idx, const float *__restrict__ weight,
float *__restrict__ grad_points) {
int batch_index = blockIdx.x;
grad_out += batch_index * n * c;
idx += batch_index * n * 3;
weight += batch_index * n * 3;
grad_points += batch_index * m * c;
const int index = threadIdx.y * blockDim.x + threadIdx.x;
const int stride = blockDim.y * blockDim.x;
for (int i = index; i < c * n; i += stride) {
const int l = i / n;
const int j = i % n;
float w1 = weight[j * 3 + 0];
float w2 = weight[j * 3 + 1];
float w3 = weight[j * 3 + 2];
int i1 = idx[j * 3 + 0];
int i2 = idx[j * 3 + 1];
int i3 = idx[j * 3 + 2];
atomicAdd(grad_points + l * m + i1, grad_out[i] * w1);
atomicAdd(grad_points + l * m + i2, grad_out[i] * w2);
atomicAdd(grad_points + l * m + i3, grad_out[i] * w3);
}
}
void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m,
const float *grad_out,
const int *idx, const float *weight,
float *grad_points) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
three_interpolate_grad_kernel<<<b, opt_block_config(n, c), 0, stream>>>(
b, c, n, m, grad_out, idx, weight, grad_points);
CUDA_CHECK_ERRORS();
}

View File

@@ -0,0 +1,91 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include "sampling.h"
#include "utils.h"
void gather_points_kernel_wrapper(int b, int c, int n, int npoints,
const float *points, const int *idx,
float *out);
void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
const float *grad_out, const int *idx,
float *grad_points);
void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
const float *dataset, float *temp,
int *idxs);
at::Tensor gather_points(at::Tensor points, at::Tensor idx) {
CHECK_CONTIGUOUS(points);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(points);
CHECK_IS_INT(idx);
if (points.type().is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({points.size(0), points.size(1), idx.size(1)},
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.type().is_cuda()) {
gather_points_kernel_wrapper(points.size(0), points.size(1), points.size(2),
idx.size(1), points.data<float>(),
idx.data<int>(), output.data<float>());
} else {
TORCH_CHECK(false, "CPU not supported");
}
return output;
}
at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx,
const int n) {
CHECK_CONTIGUOUS(grad_out);
CHECK_CONTIGUOUS(idx);
CHECK_IS_FLOAT(grad_out);
CHECK_IS_INT(idx);
if (grad_out.type().is_cuda()) {
CHECK_CUDA(idx);
}
at::Tensor output =
torch::zeros({grad_out.size(0), grad_out.size(1), n},
at::device(grad_out.device()).dtype(at::ScalarType::Float));
if (grad_out.type().is_cuda()) {
gather_points_grad_kernel_wrapper(grad_out.size(0), grad_out.size(1), n,
idx.size(1), grad_out.data<float>(),
idx.data<int>(), output.data<float>());
} else {
TORCH_CHECK(false, "CPU not supported");
}
return output;
}
at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples) {
CHECK_CONTIGUOUS(points);
CHECK_IS_FLOAT(points);
at::Tensor output =
torch::zeros({points.size(0), nsamples},
at::device(points.device()).dtype(at::ScalarType::Int));
at::Tensor tmp =
torch::full({points.size(0), points.size(1)}, 1e10,
at::device(points.device()).dtype(at::ScalarType::Float));
if (points.type().is_cuda()) {
furthest_point_sampling_kernel_wrapper(
points.size(0), points.size(1), nsamples, points.data<float>(),
tmp.data<float>(), output.data<int>());
} else {
TORCH_CHECK(false, "CPU not supported");
}
return output;
}

View File

@@ -0,0 +1,234 @@
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: points(b, c, n) idx(b, m)
// output: out(b, c, m)
__global__ void gather_points_kernel(int b, int c, int n, int m,
const float *__restrict__ points,
const int *__restrict__ idx,
float *__restrict__ out) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
out[(i * c + l) * m + j] = points[(i * c + l) * n + a];
}
}
}
}
void gather_points_kernel_wrapper(int b, int c, int n, int npoints,
const float *points, const int *idx,
float *out) {
gather_points_kernel<<<dim3(b, c, 1), opt_n_threads(npoints), 0,
at::cuda::getCurrentCUDAStream()>>>(b, c, n, npoints,
points, idx, out);
CUDA_CHECK_ERRORS();
}
// input: grad_out(b, c, m) idx(b, m)
// output: grad_points(b, c, n)
__global__ void gather_points_grad_kernel(int b, int c, int n, int m,
const float *__restrict__ grad_out,
const int *__restrict__ idx,
float *__restrict__ grad_points) {
for (int i = blockIdx.x; i < b; i += gridDim.x) {
for (int l = blockIdx.y; l < c; l += gridDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
int a = idx[i * m + j];
atomicAdd(grad_points + (i * c + l) * n + a,
grad_out[(i * c + l) * m + j]);
}
}
}
}
void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
const float *grad_out, const int *idx,
float *grad_points) {
gather_points_grad_kernel<<<dim3(b, c, 1), opt_n_threads(npoints), 0,
at::cuda::getCurrentCUDAStream()>>>(
b, c, n, npoints, grad_out, idx, grad_points);
CUDA_CHECK_ERRORS();
}
__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
int idx1, int idx2) {
const float v1 = dists[idx1], v2 = dists[idx2];
const int i1 = dists_i[idx1], i2 = dists_i[idx2];
dists[idx1] = max(v1, v2);
dists_i[idx1] = v2 > v1 ? i2 : i1;
}
// Input dataset: (b, n, 3), tmp: (b, n)
// Ouput idxs (b, m)
template <unsigned int block_size>
__global__ void furthest_point_sampling_kernel(
int b, int n, int m, const float *__restrict__ dataset,
float *__restrict__ temp, int *__restrict__ idxs) {
if (m <= 0) return;
__shared__ float dists[block_size];
__shared__ int dists_i[block_size];
int batch_index = blockIdx.x;
dataset += batch_index * n * 3;
temp += batch_index * n;
idxs += batch_index * m;
int tid = threadIdx.x;
const int stride = block_size;
int old = 0;
if (threadIdx.x == 0) idxs[0] = old;
__syncthreads();
for (int j = 1; j < m; j++) {
int besti = 0;
float best = -1;
float x1 = dataset[old * 3 + 0];
float y1 = dataset[old * 3 + 1];
float z1 = dataset[old * 3 + 2];
for (int k = tid; k < n; k += stride) {
float x2, y2, z2;
x2 = dataset[k * 3 + 0];
y2 = dataset[k * 3 + 1];
z2 = dataset[k * 3 + 2];
float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
if (mag <= 1e-3) continue;
float d =
(x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
float d2 = min(d, temp[k]);
temp[k] = d2;
besti = d2 > best ? k : besti;
best = d2 > best ? d2 : best;
}
dists[tid] = best;
dists_i[tid] = besti;
__syncthreads();
if (block_size >= 512) {
if (tid < 256) {
__update(dists, dists_i, tid, tid + 256);
}
__syncthreads();
}
if (block_size >= 256) {
if (tid < 128) {
__update(dists, dists_i, tid, tid + 128);
}
__syncthreads();
}
if (block_size >= 128) {
if (tid < 64) {
__update(dists, dists_i, tid, tid + 64);
}
__syncthreads();
}
if (block_size >= 64) {
if (tid < 32) {
__update(dists, dists_i, tid, tid + 32);
}
__syncthreads();
}
if (block_size >= 32) {
if (tid < 16) {
__update(dists, dists_i, tid, tid + 16);
}
__syncthreads();
}
if (block_size >= 16) {
if (tid < 8) {
__update(dists, dists_i, tid, tid + 8);
}
__syncthreads();
}
if (block_size >= 8) {
if (tid < 4) {
__update(dists, dists_i, tid, tid + 4);
}
__syncthreads();
}
if (block_size >= 4) {
if (tid < 2) {
__update(dists, dists_i, tid, tid + 2);
}
__syncthreads();
}
if (block_size >= 2) {
if (tid < 1) {
__update(dists, dists_i, tid, tid + 1);
}
__syncthreads();
}
old = dists_i[0];
if (tid == 0) idxs[j] = old;
}
}
void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
const float *dataset, float *temp,
int *idxs) {
unsigned int n_threads = opt_n_threads(n);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
switch (n_threads) {
case 512:
furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 256:
furthest_point_sampling_kernel<256>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 128:
furthest_point_sampling_kernel<128>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 64:
furthest_point_sampling_kernel<64>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 32:
furthest_point_sampling_kernel<32>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 16:
furthest_point_sampling_kernel<16>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 8:
furthest_point_sampling_kernel<8>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 4:
furthest_point_sampling_kernel<4>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 2:
furthest_point_sampling_kernel<2>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
case 1:
furthest_point_sampling_kernel<1>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
break;
default:
furthest_point_sampling_kernel<512>
<<<b, n_threads, 0, stream>>>(b, n, m, dataset, temp, idxs);
}
CUDA_CHECK_ERRORS();
}