From 6cdff9c83f3cf22150fb345ac92084bcec001099 Mon Sep 17 00:00:00 2001 From: hofee Date: Fri, 20 Sep 2024 11:49:32 +0000 Subject: [PATCH] change config and remove online evaluation --- app_split.py | 2 +- configs/server/split_dataset_config.yaml | 6 ++-- configs/server/train_config.yaml | 16 ++++----- core/nbv_dataset.py | 44 ++++++++++++------------ 4 files changed, 34 insertions(+), 34 deletions(-) diff --git a/app_split.py b/app_split.py index 35e803b..c9d0054 100644 --- a/app_split.py +++ b/app_split.py @@ -5,5 +5,5 @@ from runners.data_spliter import DataSpliter class DataSplitApp: @staticmethod def start(): - DataSpliter("configs/split_dataset_config.yaml").run() + DataSpliter("configs/server/split_dataset_config.yaml").run() \ No newline at end of file diff --git a/configs/server/split_dataset_config.yaml b/configs/server/split_dataset_config.yaml index 774d37d..0812e45 100644 --- a/configs/server/split_dataset_config.yaml +++ b/configs/server/split_dataset_config.yaml @@ -10,13 +10,13 @@ runner: root_dir: "experiments" split: - root_dir: "../data/sample_for_training/scenes" + root_dir: "../data/sample_for_training_preprocessed/sample_preprocessed_scenes" type: "unseen_instance" # "unseen_category" datasets: OmniObject3d_train: - path: "../data/sample_for_training/OmniObject3d_train.txt" + path: "../data/sample_for_training_preprocessed/OmniObject3d_train.txt" ratio: 0.9 OmniObject3d_test: - path: "../data/sample_for_training/OmniObject3d_test.txt" + path: "../data/sample_for_training_preprocessed/OmniObject3d_test.txt" ratio: 0.1 \ No newline at end of file diff --git a/configs/server/train_config.yaml b/configs/server/train_config.yaml index 13aa48e..45d84cf 100644 --- a/configs/server/train_config.yaml +++ b/configs/server/train_config.yaml @@ -1,19 +1,19 @@ runner: general: - seed: 1 + seed: 0 device: cuda cuda_visible_devices: "0,1,2,3,4,5,6,7" parallel: False experiment: - name: new_test_overfit_to_world + name: new_test_overfit_to_world_preprocessed root_dir: "experiments" - use_checkpoint: True + use_checkpoint: False epoch: -1 # -1 stands for last epoch max_epochs: 5000 save_checkpoint_interval: 3 - test_first: False + test_first: True train: optimizer: @@ -31,10 +31,10 @@ runner: dataset: OmniObject3d_train: - root_dir: "../data/sample_for_training/scenes" + root_dir: "../data/sample_for_training_preprocessed/sample_preprocessed_scenes" model_dir: "../data/scaled_object_meshes" source: nbv_reconstruction_dataset - split_file: "../data/sample_for_training/OmniObject3d_train.txt" + split_file: "../data/sample_for_training_preprocessed/OmniObject3d_train.txt" type: train cache: True ratio: 1 @@ -44,10 +44,10 @@ dataset: load_from_preprocess: True OmniObject3d_test: - root_dir: "../data/sample_for_training/scenes" + root_dir: "../data/sample_for_training_preprocessed/sample_preprocessed_scenes" model_dir: "../data/scaled_object_meshes" source: nbv_reconstruction_dataset - split_file: "../data/sample_for_training/OmniObject3d_train.txt" + split_file: "../data/sample_for_training_preprocessed/OmniObject3d_train.txt" type: test cache: True filter_degree: 75 diff --git a/core/nbv_dataset.py b/core/nbv_dataset.py index 72bd5db..634a745 100644 --- a/core/nbv_dataset.py +++ b/core/nbv_dataset.py @@ -161,28 +161,28 @@ class NBVReconstructionDataset(BaseDataset): } - if self.type == namespace.Mode.TEST: - diag = DataLoadUtil.get_bbox_diag(self.model_dir, scene_name) - voxel_threshold = diag*0.02 - model_points_normals = DataLoadUtil.load_points_normals(self.root_dir, scene_name) - pts_list = [] - for view in scanned_views: - frame_idx = view[0] - view_path = DataLoadUtil.get_path(self.root_dir, scene_name, frame_idx) - point_cloud = DataLoadUtil.get_target_point_cloud_world_from_path(view_path, binocular=True) - cam_params = DataLoadUtil.load_cam_info(view_path, binocular=True) - sampled_point_cloud = ReconstructionUtil.filter_points(point_cloud, model_points_normals, cam_pose=cam_params["cam_to_world"], voxel_size=voxel_threshold, theta=self.filter_degree) - pts_list.append(sampled_point_cloud) - nL_to_world_pose = cam_params["cam_to_world"] - nO_to_world_pose = cam_params["cam_to_world_O"] - nO_to_nL_pose = np.dot(np.linalg.inv(nL_to_world_pose), nO_to_world_pose) - data_item["scanned_target_pts_list"] = pts_list - data_item["model_points_normals"] = model_points_normals - data_item["voxel_threshold"] = voxel_threshold - data_item["filter_degree"] = self.filter_degree - data_item["scene_path"] = os.path.join(self.root_dir, scene_name) - data_item["first_frame_to_world"] = np.asarray(first_frame_to_world, dtype=np.float32) - data_item["nO_to_nL_pose"] = np.asarray(nO_to_nL_pose, dtype=np.float32) + # if self.type == namespace.Mode.TEST: + # diag = DataLoadUtil.get_bbox_diag(self.model_dir, scene_name) + # voxel_threshold = diag*0.02 + # model_points_normals = DataLoadUtil.load_points_normals(self.root_dir, scene_name) + # pts_list = [] + # for view in scanned_views: + # frame_idx = view[0] + # view_path = DataLoadUtil.get_path(self.root_dir, scene_name, frame_idx) + # point_cloud = DataLoadUtil.get_target_point_cloud_world_from_path(view_path, binocular=True) + # cam_params = DataLoadUtil.load_cam_info(view_path, binocular=True) + # sampled_point_cloud = ReconstructionUtil.filter_points(point_cloud, model_points_normals, cam_pose=cam_params["cam_to_world"], voxel_size=voxel_threshold, theta=self.filter_degree) + # pts_list.append(sampled_point_cloud) + # nL_to_world_pose = cam_params["cam_to_world"] + # nO_to_world_pose = cam_params["cam_to_world_O"] + # nO_to_nL_pose = np.dot(np.linalg.inv(nL_to_world_pose), nO_to_world_pose) + # data_item["scanned_target_pts_list"] = pts_list + # data_item["model_points_normals"] = model_points_normals + # data_item["voxel_threshold"] = voxel_threshold + # data_item["filter_degree"] = self.filter_degree + # data_item["scene_path"] = os.path.join(self.root_dir, scene_name) + # data_item["first_frame_to_world"] = np.asarray(first_frame_to_world, dtype=np.float32) + # data_item["nO_to_nL_pose"] = np.asarray(nO_to_nL_pose, dtype=np.float32) return data_item def __len__(self):