diff --git a/configs/local/strategy_generate_config.yaml b/configs/local/strategy_generate_config.yaml index cf444b0..ad489bc 100644 --- a/configs/local/strategy_generate_config.yaml +++ b/configs/local/strategy_generate_config.yaml @@ -16,14 +16,14 @@ runner: compute_with_normal: False scan_points_threshold: 10 overwrite: False - seq_num: 15 + seq_num: 10 dataset_list: - OmniObject3d datasets: OmniObject3d: - root_dir: C:\\Document\\Local Project\\nbv_rec\\nbv_reconstruction\\temp + root_dir: H:\\AI\\Datasets\\nbv_rec_part2 from: 0 - to: 1 # -1 means end + to: 300 # -1 means end diff --git a/preprocess/pack_preprocessed_data.py b/preprocess/pack_preprocessed_data.py new file mode 100644 index 0000000..40fd801 --- /dev/null +++ b/preprocess/pack_preprocessed_data.py @@ -0,0 +1,48 @@ +import os +import shutil + +def pack_scene_data(root, scene, output_dir): + scene_dir = os.path.join(output_dir, scene) + if not os.path.exists(scene_dir): + os.makedirs(scene_dir) + + pts_dir = os.path.join(root, scene, "pts") + if os.path.exists(pts_dir): + shutil.move(pts_dir, os.path.join(scene_dir, "pts")) + + scan_points_indices_dir = os.path.join(root, scene, "scan_points_indices") + if os.path.exists(scan_points_indices_dir): + shutil.move(scan_points_indices_dir, os.path.join(scene_dir, "scan_points_indices")) + + scan_points_file = os.path.join(root, scene, "scan_points.txt") + if os.path.exists(scan_points_file): + shutil.move(scan_points_file, os.path.join(scene_dir, "scan_points.txt")) + + model_pts_nrm_file = os.path.join(root, scene, "points_and_normals.txt") + if os.path.exists(model_pts_nrm_file): + shutil.move(model_pts_nrm_file, os.path.join(scene_dir, "points_and_normals.txt")) + + camera_dir = os.path.join(root, scene, "camera_params") + if os.path.exists(camera_dir): + shutil.move(camera_dir, os.path.join(scene_dir, "camera_params")) + + scene_info_file = os.path.join(root, scene, "scene_info.json") + if os.path.exists(scene_info_file): + shutil.move(scene_info_file, os.path.join(scene_dir, "scene_info.json")) + +def pack_all_scenes(root, scene_list, output_dir): + for idx, scene in enumerate(scene_list): + print(f"正在打包场景 {scene} ({idx+1}/{len(scene_list)})") + pack_scene_data(root, scene, output_dir) + +if __name__ == "__main__": + root = r"H:\AI\Datasets\nbv_rec_part2" + output_dir = r"H:\AI\Datasets\scene_info_part2" + scene_list = os.listdir(root) + from_idx = 0 + to_idx = len(scene_list) + print(f"正在打包场景 {scene_list[from_idx:to_idx]}") + + pack_all_scenes(root, scene_list[from_idx:to_idx], output_dir) + print("打包完成") + diff --git a/preprocess/pack_upload_data.py b/preprocess/pack_upload_data.py new file mode 100644 index 0000000..2e5de1d --- /dev/null +++ b/preprocess/pack_upload_data.py @@ -0,0 +1,41 @@ +import os +import shutil + +def pack_scene_data(root, scene, output_dir): + scene_dir = os.path.join(output_dir, scene) + if not os.path.exists(scene_dir): + os.makedirs(scene_dir) + + pts_dir = os.path.join(root, scene, "pts") + if os.path.exists(pts_dir): + shutil.move(pts_dir, os.path.join(scene_dir, "pts")) + + camera_dir = os.path.join(root, scene, "camera_params") + if os.path.exists(camera_dir): + shutil.move(camera_dir, os.path.join(scene_dir, "camera_params")) + + scene_info_file = os.path.join(root, scene, "scene_info.json") + if os.path.exists(scene_info_file): + shutil.move(scene_info_file, os.path.join(scene_dir, "scene_info.json")) + + label_dir = os.path.join(root, scene, "label") + if os.path.exists(label_dir): + shutil.move(label_dir, os.path.join(scene_dir, "label")) + + +def pack_all_scenes(root, scene_list, output_dir): + for idx, scene in enumerate(scene_list): + print(f"packing {scene} ({idx+1}/{len(scene_list)})") + pack_scene_data(root, scene, output_dir) + +if __name__ == "__main__": + root = r"H:\AI\Datasets\nbv_rec_part2" + output_dir = r"H:\AI\Datasets\upload_part2" + scene_list = os.listdir(root) + from_idx = 0 + to_idx = len(scene_list) + print(f"packing {scene_list[from_idx:to_idx]}") + + pack_all_scenes(root, scene_list[from_idx:to_idx], output_dir) + print("packing done") + diff --git a/preprocess/preprocessor.py b/preprocess/preprocessor.py index e4ad251..b1ff29a 100644 --- a/preprocess/preprocessor.py +++ b/preprocess/preprocessor.py @@ -164,10 +164,10 @@ def save_scene_data(root, scene, scene_idx=0, scene_total=1,file_type="txt"): if __name__ == "__main__": #root = "/media/hofee/repository/new_data_with_normal" - root = r"C:\Document\Datasets\nbv_rec_part2" + root = r"H:\AI\Datasets\nbv_rec_part2" scene_list = os.listdir(root) - from_idx = 600 # 1000 - to_idx = len(scene_list) # 1500 + from_idx = 0 # 1000 + to_idx = 600 # 1500 cnt = 0 diff --git a/utils/data_load.py b/utils/data_load.py index 943ae48..fd4ecaa 100644 --- a/utils/data_load.py +++ b/utils/data_load.py @@ -210,6 +210,17 @@ class DataLoadUtil: else: pts = np.load(npy_path) return pts + + @staticmethod + def load_from_preprocessed_nrm(path, file_type="npy"): + npy_path = os.path.join( + os.path.dirname(path), "nrm", os.path.basename(path) + "." + file_type + ) + if file_type == "txt": + nrm = np.loadtxt(npy_path) + else: + nrm = np.load(npy_path) + return nrm @staticmethod def cam_pose_transformation(cam_pose_before): diff --git a/utils/vis.py b/utils/vis.py index 532a21a..0e2c57e 100644 --- a/utils/vis.py +++ b/utils/vis.py @@ -158,18 +158,23 @@ class visualizeUtil: np.savetxt(os.path.join(output_dir, "target_normal.txt"), sampled_visualized_normal) @staticmethod - def save_pts_nrm(pts_nrm, output_dir): - pts = pts_nrm[:, :3] - nrm = pts_nrm[:, 3:] + def save_pts_nrm(root, scene, frame_idx, output_dir, binocular=False): + path = DataLoadUtil.get_path(root, scene, frame_idx) + pts_world = DataLoadUtil.load_from_preprocessed_pts(path, "npy") + nrm_camera = DataLoadUtil.load_from_preprocessed_nrm(path, "npy") + cam_info = DataLoadUtil.load_cam_info(path, binocular=binocular) + cam_to_world = cam_info["cam_to_world"] + nrm_world = nrm_camera @ cam_to_world[:3, :3].T visualized_nrm = [] num_samples = 10 - for i in range(len(pts)): - visualized_nrm.append(pts[i] + 0.02*t * nrm[i] for t in range(num_samples)) - visualized_nrm = np.array(visualized_nrm).reshape(-1, 3) + for i in range(len(pts_world)): + for t in range(num_samples): + visualized_nrm.append(pts_world[i] - 0.02 * t * nrm_world[i]) + + visualized_nrm = np.array(visualized_nrm) np.savetxt(os.path.join(output_dir, "nrm.txt"), visualized_nrm) - np.savetxt(os.path.join(output_dir, "pts.txt"), pts) - - + np.savetxt(os.path.join(output_dir, "pts.txt"), pts_world) + # ------ Debug ------ if __name__ == "__main__": @@ -184,6 +189,4 @@ if __name__ == "__main__": # visualizeUtil.save_seq_cam_pos_and_cam_axis(root, scene, [0, 121, 286, 175, 111,366,45,230,232,225,255,17,199,78,60], output_dir) # visualizeUtil.save_target_mesh_at_world_space(root, model_dir, scene) #visualizeUtil.save_points_and_normals(root, scene,"10", output_dir, binocular=True) - pts_nrm = np.loadtxt(r"C:\Document\Local Project\nbv_rec\nbv_reconstruction\pts_nrm_target.txt") - visualizeUtil.save_pts_nrm(pts_nrm, output_dir) - + visualizeUtil.save_pts_nrm(root, scene, "116", output_dir, binocular=True)