change config and remove online evaluation

This commit is contained in:
hofee 2024-09-20 11:49:32 +00:00
parent 8517255245
commit 6cdff9c83f
4 changed files with 34 additions and 34 deletions

View File

@ -5,5 +5,5 @@ from runners.data_spliter import DataSpliter
class DataSplitApp:
@staticmethod
def start():
DataSpliter("configs/split_dataset_config.yaml").run()
DataSpliter("configs/server/split_dataset_config.yaml").run()

View File

@ -10,13 +10,13 @@ runner:
root_dir: "experiments"
split:
root_dir: "../data/sample_for_training/scenes"
root_dir: "../data/sample_for_training_preprocessed/sample_preprocessed_scenes"
type: "unseen_instance" # "unseen_category"
datasets:
OmniObject3d_train:
path: "../data/sample_for_training/OmniObject3d_train.txt"
path: "../data/sample_for_training_preprocessed/OmniObject3d_train.txt"
ratio: 0.9
OmniObject3d_test:
path: "../data/sample_for_training/OmniObject3d_test.txt"
path: "../data/sample_for_training_preprocessed/OmniObject3d_test.txt"
ratio: 0.1

View File

@ -1,19 +1,19 @@
runner:
general:
seed: 1
seed: 0
device: cuda
cuda_visible_devices: "0,1,2,3,4,5,6,7"
parallel: False
experiment:
name: new_test_overfit_to_world
name: new_test_overfit_to_world_preprocessed
root_dir: "experiments"
use_checkpoint: True
use_checkpoint: False
epoch: -1 # -1 stands for last epoch
max_epochs: 5000
save_checkpoint_interval: 3
test_first: False
test_first: True
train:
optimizer:
@ -31,10 +31,10 @@ runner:
dataset:
OmniObject3d_train:
root_dir: "../data/sample_for_training/scenes"
root_dir: "../data/sample_for_training_preprocessed/sample_preprocessed_scenes"
model_dir: "../data/scaled_object_meshes"
source: nbv_reconstruction_dataset
split_file: "../data/sample_for_training/OmniObject3d_train.txt"
split_file: "../data/sample_for_training_preprocessed/OmniObject3d_train.txt"
type: train
cache: True
ratio: 1
@ -44,10 +44,10 @@ dataset:
load_from_preprocess: True
OmniObject3d_test:
root_dir: "../data/sample_for_training/scenes"
root_dir: "../data/sample_for_training_preprocessed/sample_preprocessed_scenes"
model_dir: "../data/scaled_object_meshes"
source: nbv_reconstruction_dataset
split_file: "../data/sample_for_training/OmniObject3d_train.txt"
split_file: "../data/sample_for_training_preprocessed/OmniObject3d_train.txt"
type: test
cache: True
filter_degree: 75

View File

@ -161,28 +161,28 @@ class NBVReconstructionDataset(BaseDataset):
}
if self.type == namespace.Mode.TEST:
diag = DataLoadUtil.get_bbox_diag(self.model_dir, scene_name)
voxel_threshold = diag*0.02
model_points_normals = DataLoadUtil.load_points_normals(self.root_dir, scene_name)
pts_list = []
for view in scanned_views:
frame_idx = view[0]
view_path = DataLoadUtil.get_path(self.root_dir, scene_name, frame_idx)
point_cloud = DataLoadUtil.get_target_point_cloud_world_from_path(view_path, binocular=True)
cam_params = DataLoadUtil.load_cam_info(view_path, binocular=True)
sampled_point_cloud = ReconstructionUtil.filter_points(point_cloud, model_points_normals, cam_pose=cam_params["cam_to_world"], voxel_size=voxel_threshold, theta=self.filter_degree)
pts_list.append(sampled_point_cloud)
nL_to_world_pose = cam_params["cam_to_world"]
nO_to_world_pose = cam_params["cam_to_world_O"]
nO_to_nL_pose = np.dot(np.linalg.inv(nL_to_world_pose), nO_to_world_pose)
data_item["scanned_target_pts_list"] = pts_list
data_item["model_points_normals"] = model_points_normals
data_item["voxel_threshold"] = voxel_threshold
data_item["filter_degree"] = self.filter_degree
data_item["scene_path"] = os.path.join(self.root_dir, scene_name)
data_item["first_frame_to_world"] = np.asarray(first_frame_to_world, dtype=np.float32)
data_item["nO_to_nL_pose"] = np.asarray(nO_to_nL_pose, dtype=np.float32)
# if self.type == namespace.Mode.TEST:
# diag = DataLoadUtil.get_bbox_diag(self.model_dir, scene_name)
# voxel_threshold = diag*0.02
# model_points_normals = DataLoadUtil.load_points_normals(self.root_dir, scene_name)
# pts_list = []
# for view in scanned_views:
# frame_idx = view[0]
# view_path = DataLoadUtil.get_path(self.root_dir, scene_name, frame_idx)
# point_cloud = DataLoadUtil.get_target_point_cloud_world_from_path(view_path, binocular=True)
# cam_params = DataLoadUtil.load_cam_info(view_path, binocular=True)
# sampled_point_cloud = ReconstructionUtil.filter_points(point_cloud, model_points_normals, cam_pose=cam_params["cam_to_world"], voxel_size=voxel_threshold, theta=self.filter_degree)
# pts_list.append(sampled_point_cloud)
# nL_to_world_pose = cam_params["cam_to_world"]
# nO_to_world_pose = cam_params["cam_to_world_O"]
# nO_to_nL_pose = np.dot(np.linalg.inv(nL_to_world_pose), nO_to_world_pose)
# data_item["scanned_target_pts_list"] = pts_list
# data_item["model_points_normals"] = model_points_normals
# data_item["voxel_threshold"] = voxel_threshold
# data_item["filter_degree"] = self.filter_degree
# data_item["scene_path"] = os.path.join(self.root_dir, scene_name)
# data_item["first_frame_to_world"] = np.asarray(first_frame_to_world, dtype=np.float32)
# data_item["nO_to_nL_pose"] = np.asarray(nO_to_nL_pose, dtype=np.float32)
return data_item
def __len__(self):