Compare commits
4 Commits
master
...
ab_global_
Author | SHA1 | Date | |
---|---|---|---|
1123e69bff | |||
5e8684d149 | |||
96fa40cc35 | |||
b82b92eebb |
@ -6,17 +6,17 @@ runner:
|
|||||||
cuda_visible_devices: "0,1,2,3,4,5,6,7"
|
cuda_visible_devices: "0,1,2,3,4,5,6,7"
|
||||||
|
|
||||||
experiment:
|
experiment:
|
||||||
name: debug
|
name: server_split_dataset
|
||||||
root_dir: "experiments"
|
root_dir: "experiments"
|
||||||
|
|
||||||
split: #
|
split: #
|
||||||
root_dir: "/data/hofee/data/packed_preprocessed_data"
|
root_dir: "/data/hofee/data/new_full_data"
|
||||||
type: "unseen_instance" # "unseen_category"
|
type: "unseen_instance" # "unseen_category"
|
||||||
datasets:
|
datasets:
|
||||||
OmniObject3d_train:
|
OmniObject3d_train:
|
||||||
path: "/data/hofee/data/OmniObject3d_train.txt"
|
path: "/data/hofee/data/new_full_data_list/OmniObject3d_train.txt"
|
||||||
ratio: 0.9
|
ratio: 0.9
|
||||||
|
|
||||||
OmniObject3d_test:
|
OmniObject3d_test:
|
||||||
path: "/data/hofee/data/OmniObject3d_test.txt"
|
path: "/data/hofee/data/new_full_data_list/OmniObject3d_test.txt"
|
||||||
ratio: 0.1
|
ratio: 0.1
|
@ -7,7 +7,7 @@ runner:
|
|||||||
parallel: False
|
parallel: False
|
||||||
|
|
||||||
experiment:
|
experiment:
|
||||||
name: debug
|
name: train_ab_global_and_partial_global
|
||||||
root_dir: "experiments"
|
root_dir: "experiments"
|
||||||
use_checkpoint: False
|
use_checkpoint: False
|
||||||
epoch: -1 # -1 stands for last epoch
|
epoch: -1 # -1 stands for last epoch
|
||||||
@ -28,50 +28,50 @@ runner:
|
|||||||
#- OmniObject3d_test
|
#- OmniObject3d_test
|
||||||
- OmniObject3d_val
|
- OmniObject3d_val
|
||||||
|
|
||||||
pipeline: nbv_reconstruction_global_pts_n_num_pipeline
|
pipeline: nbv_reconstruction_pipeline
|
||||||
|
|
||||||
dataset:
|
dataset:
|
||||||
OmniObject3d_train:
|
OmniObject3d_train:
|
||||||
root_dir: "/home/data/hofee/project/nbv_rec/data/sample_for_training_new"
|
root_dir: "/data/hofee/data/new_full_data"
|
||||||
model_dir: "../data/scaled_object_meshes"
|
model_dir: "../data/scaled_object_meshes"
|
||||||
source: nbv_reconstruction_dataset
|
source: nbv_reconstruction_dataset
|
||||||
split_file: "/home/data/hofee/project/nbv_rec/data/sample.txt"
|
split_file: "/data/hofee/data/new_full_data_list/OmniObject3d_train.txt"
|
||||||
type: train
|
type: train
|
||||||
cache: True
|
cache: True
|
||||||
ratio: 1
|
ratio: 1
|
||||||
batch_size: 160
|
batch_size: 80
|
||||||
num_workers: 16
|
num_workers: 128
|
||||||
pts_num: 8192
|
pts_num: 8192
|
||||||
load_from_preprocess: True
|
load_from_preprocess: True
|
||||||
|
|
||||||
OmniObject3d_test:
|
OmniObject3d_test:
|
||||||
root_dir: "/home/data/hofee/project/nbv_rec/data/sample_for_training_new"
|
root_dir: "/data/hofee/data/new_full_data"
|
||||||
model_dir: "../data/scaled_object_meshes"
|
model_dir: "../data/scaled_object_meshes"
|
||||||
source: nbv_reconstruction_dataset
|
source: nbv_reconstruction_dataset
|
||||||
split_file: "/home/data/hofee/project/nbv_rec/data/sample.txt"
|
split_file: "/data/hofee/data/new_full_data_list/OmniObject3d_test.txt"
|
||||||
type: test
|
type: test
|
||||||
cache: True
|
cache: True
|
||||||
filter_degree: 75
|
filter_degree: 75
|
||||||
eval_list:
|
eval_list:
|
||||||
- pose_diff
|
- pose_diff
|
||||||
ratio: 0.05
|
ratio: 1
|
||||||
batch_size: 160
|
batch_size: 80
|
||||||
num_workers: 12
|
num_workers: 12
|
||||||
pts_num: 8192
|
pts_num: 8192
|
||||||
load_from_preprocess: True
|
load_from_preprocess: True
|
||||||
|
|
||||||
OmniObject3d_val:
|
OmniObject3d_val:
|
||||||
root_dir: "/home/data/hofee/project/nbv_rec/data/sample_for_training_new"
|
root_dir: "/data/hofee/data/new_full_data"
|
||||||
model_dir: "../data/scaled_object_meshes"
|
model_dir: "../data/scaled_object_meshes"
|
||||||
source: nbv_reconstruction_dataset
|
source: nbv_reconstruction_dataset
|
||||||
split_file: "/home/data/hofee/project/nbv_rec/data/sample.txt"
|
split_file: "/data/hofee/data/new_full_data_list/OmniObject3d_train.txt"
|
||||||
type: test
|
type: test
|
||||||
cache: True
|
cache: True
|
||||||
filter_degree: 75
|
filter_degree: 75
|
||||||
eval_list:
|
eval_list:
|
||||||
- pose_diff
|
- pose_diff
|
||||||
ratio: 0.005
|
ratio: 0.1
|
||||||
batch_size: 160
|
batch_size: 80
|
||||||
num_workers: 12
|
num_workers: 12
|
||||||
pts_num: 8192
|
pts_num: 8192
|
||||||
load_from_preprocess: True
|
load_from_preprocess: True
|
||||||
@ -97,7 +97,7 @@ module:
|
|||||||
feature_transform: False
|
feature_transform: False
|
||||||
|
|
||||||
transformer_seq_encoder:
|
transformer_seq_encoder:
|
||||||
embed_dim: 256
|
embed_dim: 320
|
||||||
num_heads: 4
|
num_heads: 4
|
||||||
ffn_dim: 256
|
ffn_dim: 256
|
||||||
num_layers: 3
|
num_layers: 3
|
||||||
|
@ -7,6 +7,7 @@ from PytorchBoot.utils.log_util import Log
|
|||||||
import torch
|
import torch
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
sys.path.append(r"/data/hofee/project/nbv_rec/nbv_reconstruction")
|
sys.path.append(r"/data/hofee/project/nbv_rec/nbv_reconstruction")
|
||||||
|
|
||||||
@ -34,7 +35,7 @@ class NBVReconstructionDataset(BaseDataset):
|
|||||||
#self.model_dir = config["model_dir"]
|
#self.model_dir = config["model_dir"]
|
||||||
self.filter_degree = config["filter_degree"]
|
self.filter_degree = config["filter_degree"]
|
||||||
if self.type == namespace.Mode.TRAIN:
|
if self.type == namespace.Mode.TRAIN:
|
||||||
scale_ratio = 100
|
scale_ratio = 1
|
||||||
self.datalist = self.datalist*scale_ratio
|
self.datalist = self.datalist*scale_ratio
|
||||||
if self.cache:
|
if self.cache:
|
||||||
expr_root = ConfigManager.get("runner", "experiment", "root_dir")
|
expr_root = ConfigManager.get("runner", "experiment", "root_dir")
|
||||||
@ -114,8 +115,13 @@ class NBVReconstructionDataset(BaseDataset):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
Log.error(f"Save cache failed: {e}")
|
Log.error(f"Save cache failed: {e}")
|
||||||
|
|
||||||
def voxel_downsample_with_mask(self, pts, voxel_size):
|
def voxel_downsample_with_mapping(self, point_cloud, voxel_size=0.003):
|
||||||
pass
|
voxel_indices = np.floor(point_cloud / voxel_size).astype(np.int32)
|
||||||
|
unique_voxels, inverse, counts = np.unique(voxel_indices, axis=0, return_inverse=True, return_counts=True)
|
||||||
|
idx_sort = np.argsort(inverse)
|
||||||
|
idx_unique = idx_sort[np.cumsum(counts)-counts]
|
||||||
|
downsampled_points = point_cloud[idx_unique]
|
||||||
|
return downsampled_points, inverse
|
||||||
|
|
||||||
|
|
||||||
def __getitem__(self, index):
|
def __getitem__(self, index):
|
||||||
@ -129,6 +135,9 @@ class NBVReconstructionDataset(BaseDataset):
|
|||||||
scanned_coverages_rate,
|
scanned_coverages_rate,
|
||||||
scanned_n_to_world_pose,
|
scanned_n_to_world_pose,
|
||||||
) = ([], [], [])
|
) = ([], [], [])
|
||||||
|
start_time = time.time()
|
||||||
|
start_indices = [0]
|
||||||
|
total_points = 0
|
||||||
for view in scanned_views:
|
for view in scanned_views:
|
||||||
frame_idx = view[0]
|
frame_idx = view[0]
|
||||||
coverage_rate = view[1]
|
coverage_rate = view[1]
|
||||||
@ -150,8 +159,12 @@ class NBVReconstructionDataset(BaseDataset):
|
|||||||
n_to_world_trans = n_to_world_pose[:3, 3]
|
n_to_world_trans = n_to_world_pose[:3, 3]
|
||||||
n_to_world_9d = np.concatenate([n_to_world_6d, n_to_world_trans], axis=0)
|
n_to_world_9d = np.concatenate([n_to_world_6d, n_to_world_trans], axis=0)
|
||||||
scanned_n_to_world_pose.append(n_to_world_9d)
|
scanned_n_to_world_pose.append(n_to_world_9d)
|
||||||
|
total_points += len(downsampled_target_point_cloud)
|
||||||
|
start_indices.append(total_points)
|
||||||
|
|
||||||
|
|
||||||
|
end_time = time.time()
|
||||||
|
#Log.info(f"load data time: {end_time - start_time}")
|
||||||
nbv_idx, nbv_coverage_rate = nbv[0], nbv[1]
|
nbv_idx, nbv_coverage_rate = nbv[0], nbv[1]
|
||||||
nbv_path = DataLoadUtil.get_path(self.root_dir, scene_name, nbv_idx)
|
nbv_path = DataLoadUtil.get_path(self.root_dir, scene_name, nbv_idx)
|
||||||
cam_info = DataLoadUtil.load_cam_info(nbv_path)
|
cam_info = DataLoadUtil.load_cam_info(nbv_path)
|
||||||
@ -166,12 +179,25 @@ class NBVReconstructionDataset(BaseDataset):
|
|||||||
)
|
)
|
||||||
|
|
||||||
combined_scanned_views_pts = np.concatenate(scanned_views_pts, axis=0)
|
combined_scanned_views_pts = np.concatenate(scanned_views_pts, axis=0)
|
||||||
voxel_downsampled_combined_scanned_pts_np = PtsUtil.voxel_downsample_point_cloud(combined_scanned_views_pts, 0.002)
|
voxel_downsampled_combined_scanned_pts_np, inverse = self.voxel_downsample_with_mapping(combined_scanned_views_pts, 0.003)
|
||||||
random_downsampled_combined_scanned_pts_np = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, self.pts_num)
|
random_downsampled_combined_scanned_pts_np, random_downsample_idx = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, self.pts_num, require_idx=True)
|
||||||
|
|
||||||
|
all_idx_unique = np.arange(len(voxel_downsampled_combined_scanned_pts_np))
|
||||||
|
all_random_downsample_idx = all_idx_unique[random_downsample_idx]
|
||||||
|
scanned_pts_mask = []
|
||||||
|
for idx, start_idx in enumerate(start_indices):
|
||||||
|
if idx == len(start_indices) - 1:
|
||||||
|
break
|
||||||
|
end_idx = start_indices[idx+1]
|
||||||
|
view_inverse = inverse[start_idx:end_idx]
|
||||||
|
view_unique_downsampled_idx = np.unique(view_inverse)
|
||||||
|
view_unique_downsampled_idx_set = set(view_unique_downsampled_idx)
|
||||||
|
mask = np.array([idx in view_unique_downsampled_idx_set for idx in all_random_downsample_idx])
|
||||||
|
scanned_pts_mask.append(mask)
|
||||||
data_item = {
|
data_item = {
|
||||||
"scanned_pts": np.asarray(scanned_views_pts, dtype=np.float32), # Ndarray(S x Nv x 3)
|
"scanned_pts": np.asarray(scanned_views_pts, dtype=np.float32), # Ndarray(S x Nv x 3)
|
||||||
"combined_scanned_pts": np.asarray(random_downsampled_combined_scanned_pts_np, dtype=np.float32), # Ndarray(N x 3)
|
"combined_scanned_pts": np.asarray(random_downsampled_combined_scanned_pts_np, dtype=np.float32), # Ndarray(N x 3)
|
||||||
|
"scanned_pts_mask": np.asarray(scanned_pts_mask, dtype=np.bool), # Ndarray(N)
|
||||||
"scanned_coverage_rate": scanned_coverages_rate, # List(S): Float, range(0, 1)
|
"scanned_coverage_rate": scanned_coverages_rate, # List(S): Float, range(0, 1)
|
||||||
"scanned_n_to_world_pose_9d": np.asarray(scanned_n_to_world_pose, dtype=np.float32), # Ndarray(S x 9)
|
"scanned_n_to_world_pose_9d": np.asarray(scanned_n_to_world_pose, dtype=np.float32), # Ndarray(S x 9)
|
||||||
"best_coverage_rate": nbv_coverage_rate, # Float, range(0, 1)
|
"best_coverage_rate": nbv_coverage_rate, # Float, range(0, 1)
|
||||||
@ -197,7 +223,9 @@ class NBVReconstructionDataset(BaseDataset):
|
|||||||
collate_data["scanned_n_to_world_pose_9d"] = [
|
collate_data["scanned_n_to_world_pose_9d"] = [
|
||||||
torch.tensor(item["scanned_n_to_world_pose_9d"]) for item in batch
|
torch.tensor(item["scanned_n_to_world_pose_9d"]) for item in batch
|
||||||
]
|
]
|
||||||
|
collate_data["scanned_pts_mask"] = [
|
||||||
|
torch.tensor(item["scanned_pts_mask"]) for item in batch
|
||||||
|
]
|
||||||
''' ------ Fixed Length ------ '''
|
''' ------ Fixed Length ------ '''
|
||||||
|
|
||||||
collate_data["best_to_world_pose_9d"] = torch.stack(
|
collate_data["best_to_world_pose_9d"] = torch.stack(
|
||||||
@ -206,17 +234,14 @@ class NBVReconstructionDataset(BaseDataset):
|
|||||||
collate_data["combined_scanned_pts"] = torch.stack(
|
collate_data["combined_scanned_pts"] = torch.stack(
|
||||||
[torch.tensor(item["combined_scanned_pts"]) for item in batch]
|
[torch.tensor(item["combined_scanned_pts"]) for item in batch]
|
||||||
)
|
)
|
||||||
collate_data["scanned_pts_mask"] = torch.stack(
|
|
||||||
[torch.tensor(item["scanned_pts_mask"]) for item in batch]
|
|
||||||
)
|
|
||||||
|
|
||||||
for key in batch[0].keys():
|
for key in batch[0].keys():
|
||||||
if key not in [
|
if key not in [
|
||||||
"scanned_pts",
|
"scanned_pts",
|
||||||
"scanned_pts_mask",
|
|
||||||
"scanned_n_to_world_pose_9d",
|
"scanned_n_to_world_pose_9d",
|
||||||
"best_to_world_pose_9d",
|
"best_to_world_pose_9d",
|
||||||
"combined_scanned_pts",
|
"combined_scanned_pts",
|
||||||
|
"scanned_pts_mask",
|
||||||
]:
|
]:
|
||||||
collate_data[key] = [item[key] for item in batch]
|
collate_data[key] = [item[key] for item in batch]
|
||||||
return collate_data
|
return collate_data
|
||||||
@ -232,9 +257,9 @@ if __name__ == "__main__":
|
|||||||
torch.manual_seed(seed)
|
torch.manual_seed(seed)
|
||||||
np.random.seed(seed)
|
np.random.seed(seed)
|
||||||
config = {
|
config = {
|
||||||
"root_dir": "/data/hofee/data/packed_preprocessed_data",
|
"root_dir": "/data/hofee/nbv_rec_part2_preprocessed",
|
||||||
"source": "nbv_reconstruction_dataset",
|
"source": "nbv_reconstruction_dataset",
|
||||||
"split_file": "/data/hofee/data/OmniObject3d_train.txt",
|
"split_file": "/data/hofee/data/sample.txt",
|
||||||
"load_from_preprocess": True,
|
"load_from_preprocess": True,
|
||||||
"ratio": 0.5,
|
"ratio": 0.5,
|
||||||
"batch_size": 2,
|
"batch_size": 2,
|
||||||
|
@ -20,8 +20,8 @@ class NBVReconstructionPipeline(nn.Module):
|
|||||||
self.pose_encoder = ComponentFactory.create(
|
self.pose_encoder = ComponentFactory.create(
|
||||||
namespace.Stereotype.MODULE, self.module_config["pose_encoder"]
|
namespace.Stereotype.MODULE, self.module_config["pose_encoder"]
|
||||||
)
|
)
|
||||||
self.transformer_seq_encoder = ComponentFactory.create(
|
self.seq_encoder = ComponentFactory.create(
|
||||||
namespace.Stereotype.MODULE, self.module_config["transformer_seq_encoder"]
|
namespace.Stereotype.MODULE, self.module_config["seq_encoder"]
|
||||||
)
|
)
|
||||||
self.view_finder = ComponentFactory.create(
|
self.view_finder = ComponentFactory.create(
|
||||||
namespace.Stereotype.MODULE, self.module_config["view_finder"]
|
namespace.Stereotype.MODULE, self.module_config["view_finder"]
|
||||||
@ -54,10 +54,7 @@ class NBVReconstructionPipeline(nn.Module):
|
|||||||
return perturbed_x, random_t, target_score, std
|
return perturbed_x, random_t, target_score, std
|
||||||
|
|
||||||
def forward_train(self, data):
|
def forward_train(self, data):
|
||||||
start_time = time.time()
|
|
||||||
main_feat = self.get_main_feat(data)
|
main_feat = self.get_main_feat(data)
|
||||||
end_time = time.time()
|
|
||||||
print("get_main_feat time: ", end_time - start_time)
|
|
||||||
""" get std """
|
""" get std """
|
||||||
best_to_world_pose_9d_batch = data["best_to_world_pose_9d"]
|
best_to_world_pose_9d_batch = data["best_to_world_pose_9d"]
|
||||||
perturbed_x, random_t, target_score, std = self.pertube_data(
|
perturbed_x, random_t, target_score, std = self.pertube_data(
|
||||||
@ -92,25 +89,49 @@ class NBVReconstructionPipeline(nn.Module):
|
|||||||
"scanned_n_to_world_pose_9d"
|
"scanned_n_to_world_pose_9d"
|
||||||
] # List(B): Tensor(S x 9)
|
] # List(B): Tensor(S x 9)
|
||||||
|
|
||||||
|
scanned_pts_mask_batch = data["scanned_pts_mask"] # List(B): Tensor(N)
|
||||||
|
|
||||||
device = next(self.parameters()).device
|
device = next(self.parameters()).device
|
||||||
|
|
||||||
embedding_list_batch = []
|
embedding_list_batch = []
|
||||||
|
|
||||||
combined_scanned_pts_batch = data["combined_scanned_pts"] # Tensor(B x N x 3)
|
combined_scanned_pts_batch = data["combined_scanned_pts"] # Tensor(B x N x 3)
|
||||||
global_scanned_feat = self.pts_encoder.encode_points(
|
global_scanned_feat, per_point_feat_batch = self.pts_encoder.encode_points(
|
||||||
combined_scanned_pts_batch, require_per_point_feat=False
|
combined_scanned_pts_batch, require_per_point_feat=True
|
||||||
) # global_scanned_feat: Tensor(B x Dg)
|
) # global_scanned_feat: Tensor(B x Dg)
|
||||||
|
batch_size = len(scanned_n_to_world_pose_9d_batch)
|
||||||
|
for i in range(batch_size):
|
||||||
|
seq_len = len(scanned_n_to_world_pose_9d_batch[i])
|
||||||
|
scanned_n_to_world_pose_9d = scanned_n_to_world_pose_9d_batch[i].to(device) # Tensor(S x 9)
|
||||||
|
scanned_pts_mask = scanned_pts_mask_batch[i] # Tensor(S x N)
|
||||||
|
per_point_feat = per_point_feat_batch[i] # Tensor(N x Dp)
|
||||||
|
partial_point_feat_seq = []
|
||||||
|
for j in range(seq_len):
|
||||||
|
partial_per_point_feat = per_point_feat[scanned_pts_mask[j]]
|
||||||
|
if partial_per_point_feat.shape[0] == 0:
|
||||||
|
partial_point_feat = torch.zeros(per_point_feat.shape[1], device=device)
|
||||||
|
else:
|
||||||
|
partial_point_feat = torch.mean(partial_per_point_feat, dim=0) # Tensor(Dp)
|
||||||
|
partial_point_feat_seq.append(partial_point_feat)
|
||||||
|
partial_point_feat_seq = torch.stack(partial_point_feat_seq, dim=0) # Tensor(S x Dp)
|
||||||
|
|
||||||
for scanned_n_to_world_pose_9d in scanned_n_to_world_pose_9d_batch:
|
|
||||||
scanned_n_to_world_pose_9d = scanned_n_to_world_pose_9d.to(device) # Tensor(S x 9)
|
|
||||||
pose_feat_seq = self.pose_encoder.encode_pose(scanned_n_to_world_pose_9d) # Tensor(S x Dp)
|
pose_feat_seq = self.pose_encoder.encode_pose(scanned_n_to_world_pose_9d) # Tensor(S x Dp)
|
||||||
seq_embedding = pose_feat_seq
|
|
||||||
|
seq_embedding = torch.cat([partial_point_feat_seq, pose_feat_seq], dim=-1)
|
||||||
|
|
||||||
embedding_list_batch.append(seq_embedding) # List(B): Tensor(S x (Dp))
|
embedding_list_batch.append(seq_embedding) # List(B): Tensor(S x (Dp))
|
||||||
|
|
||||||
seq_feat = self.transformer_seq_encoder.encode_sequence(embedding_list_batch) # Tensor(B x Ds)
|
seq_feat = self.seq_encoder.encode_sequence(embedding_list_batch) # Tensor(B x Ds)
|
||||||
main_feat = torch.cat([seq_feat, global_scanned_feat], dim=-1) # Tensor(B x (Ds+Dg))
|
main_feat = torch.cat([seq_feat, global_scanned_feat], dim=-1) # Tensor(B x (Ds+Dg))
|
||||||
|
|
||||||
if torch.isnan(main_feat).any():
|
if torch.isnan(main_feat).any():
|
||||||
|
for i in range(len(main_feat)):
|
||||||
|
if torch.isnan(main_feat[i]).any():
|
||||||
|
scanned_pts_mask = scanned_pts_mask_batch[i]
|
||||||
|
Log.info(f"scanned_pts_mask shape: {scanned_pts_mask.shape}")
|
||||||
|
Log.info(f"scanned_pts_mask sum: {scanned_pts_mask.sum()}")
|
||||||
|
import ipdb
|
||||||
|
ipdb.set_trace()
|
||||||
Log.error("nan in main_feat", True)
|
Log.error("nan in main_feat", True)
|
||||||
|
|
||||||
return main_feat
|
return main_feat
|
||||||
|
30
utils/pts.py
30
utils/pts.py
@ -14,16 +14,38 @@ class PtsUtil:
|
|||||||
downsampled_points = point_cloud[idx_unique]
|
downsampled_points = point_cloud[idx_unique]
|
||||||
return downsampled_points, idx_unique
|
return downsampled_points, idx_unique
|
||||||
else:
|
else:
|
||||||
unique_voxels = np.unique(voxel_indices, axis=0, return_inverse=True)
|
import ipdb; ipdb.set_trace()
|
||||||
return unique_voxels[0]*voxel_size
|
unique_voxels = np.unique(voxel_indices, axis=0, return_inverse=False)
|
||||||
|
return unique_voxels*voxel_size
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def random_downsample_point_cloud(point_cloud, num_points, require_idx=False):
|
def voxel_downsample_point_cloud_o3d(point_cloud, voxel_size=0.005):
|
||||||
|
pcd = o3d.geometry.PointCloud()
|
||||||
|
pcd.points = o3d.utility.Vector3dVector(point_cloud)
|
||||||
|
pcd = pcd.voxel_down_sample(voxel_size)
|
||||||
|
return np.asarray(pcd.points)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def voxel_downsample_point_cloud_and_trace_o3d(point_cloud, voxel_size=0.005):
|
||||||
|
pcd = o3d.geometry.PointCloud()
|
||||||
|
pcd.points = o3d.utility.Vector3dVector(point_cloud)
|
||||||
|
max_bound = pcd.get_max_bound()
|
||||||
|
min_bound = pcd.get_min_bound()
|
||||||
|
pcd = pcd.voxel_down_sample_and_trace(voxel_size, max_bound, min_bound, True)
|
||||||
|
|
||||||
|
return np.asarray(pcd.points)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def random_downsample_point_cloud(point_cloud, num_points, require_idx=False, replace=True):
|
||||||
if point_cloud.shape[0] == 0:
|
if point_cloud.shape[0] == 0:
|
||||||
if require_idx:
|
if require_idx:
|
||||||
return point_cloud, np.array([])
|
return point_cloud, np.array([])
|
||||||
return point_cloud
|
return point_cloud
|
||||||
idx = np.random.choice(len(point_cloud), num_points, replace=True)
|
if not replace and num_points > len(point_cloud):
|
||||||
|
if require_idx:
|
||||||
|
return point_cloud, np.arange(len(point_cloud))
|
||||||
|
return point_cloud
|
||||||
|
idx = np.random.choice(len(point_cloud), num_points, replace=replace)
|
||||||
if require_idx:
|
if require_idx:
|
||||||
return point_cloud[idx], idx
|
return point_cloud[idx], idx
|
||||||
return point_cloud[idx]
|
return point_cloud[idx]
|
||||||
|
Loading…
x
Reference in New Issue
Block a user