runner: general: seed: 0 device: cuda cuda_visible_devices: "0,1,2,3,4,5,6,7" experiment: name: ab_global_only root_dir: "experiments" epoch: 200 # -1 stands for last epoch test: dataset_list: - OmniObject3d_test blender_script_path: "/media/hofee/data/project/python/nbv_reconstruction/blender/data_renderer.py" output_dir: "/media/hofee/data/project/exp/new_no_cluster_ab_global_only" pipeline: nbv_reconstruction_pipeline_global_only voxel_size: 0.003 min_new_area: 1.0 overlap_limit: True enable_cluster: False dataset: OmniObject3d_test: root_dir: "/media/hofee/repository/final_test_set/preprocessed_dataset" model_dir: "/media/hofee/data/data/target/target_formulated_view" source: seq_reconstruction_dataset_preprocessed type: test filter_degree: 75 eval_list: - pose_diff - coverage_rate_increase ratio: 0.1 batch_size: 1 num_workers: 12 pts_num: 8192 load_from_preprocess: True pipeline: nbv_reconstruction_pipeline_local: modules: pts_encoder: pointnet++_encoder seq_encoder: transformer_seq_encoder pose_encoder: pose_encoder view_finder: gf_view_finder eps: 1e-5 global_scanned_feat: True nbv_reconstruction_pipeline_global: modules: pts_encoder: pointnet++_encoder seq_encoder: transformer_seq_encoder pose_encoder: pose_encoder view_finder: gf_view_finder eps: 1e-5 global_scanned_feat: True nbv_reconstruction_pipeline_local_only: modules: pts_encoder: pointnet++_encoder seq_encoder: transformer_seq_encoder pose_encoder: pose_encoder view_finder: gf_view_finder eps: 1e-5 global_scanned_feat: True nbv_reconstruction_pipeline_global_only: modules: pts_encoder: pointnet++_encoder seq_encoder: transformer_seq_encoder pose_encoder: pose_encoder view_finder: gf_view_finder eps: 1e-5 global_scanned_feat: True nbv_reconstruction_pipeline_mlp: modules: pts_encoder: pointnet++_encoder seq_encoder: transformer_seq_encoder pose_encoder: pose_encoder view_finder: mlp_view_finder eps: 1e-5 global_scanned_feat: True module: pointnet_encoder: in_dim: 3 out_dim: 1024 global_feat: True feature_transform: False pointnet++_encoder: in_dim: 3 transformer_seq_encoder: embed_dim: 256 num_heads: 4 ffn_dim: 256 num_layers: 3 output_dim: 1024 gf_view_finder: t_feat_dim: 128 pose_feat_dim: 256 main_feat_dim: 1024 regression_head: Rx_Ry_and_T pose_mode: rot_matrix per_point_feature: False sample_mode: ode sampling_steps: 500 sde_mode: ve pose_encoder: pose_dim: 9 out_dim: 256 pts_num_encoder: out_dim: 64