global: inference debug
This commit is contained in:
parent
ecd4cfa806
commit
982a3b9b60
@ -12,7 +12,7 @@ runner:
|
||||
|
||||
test:
|
||||
dataset_list:
|
||||
- OmniObject3d_train
|
||||
- OmniObject3d_test
|
||||
|
||||
blender_script_path: "/data/hofee/project/nbv_rec/blender/data_renderer.py"
|
||||
output_dir: "/data/hofee/data/inference_global_full_on_testset"
|
||||
@ -22,7 +22,7 @@ runner:
|
||||
dataset:
|
||||
OmniObject3d_train:
|
||||
root_dir: "/data/hofee/data/new_full_data"
|
||||
model_dir: "/data/hofee/data/object_meshes_part3"
|
||||
model_dir: "/data/hofee/data/scaled_object_meshes"
|
||||
source: seq_reconstruction_dataset
|
||||
split_file: "/data/hofee/data/sample.txt"
|
||||
type: test
|
||||
@ -35,9 +35,9 @@ dataset:
|
||||
|
||||
OmniObject3d_test:
|
||||
root_dir: "/data/hofee/data/new_full_data"
|
||||
model_dir: "/data/hofee/data/object_meshes_part3"
|
||||
model_dir: "/data/hofee/data/scaled_object_meshes"
|
||||
source: seq_reconstruction_dataset
|
||||
split_file: "/data/hofee/data/sample.txt"
|
||||
split_file: "/data/hofee/data/new_full_data_list/OmniObject3d_test.txt"
|
||||
type: test
|
||||
filter_degree: 75
|
||||
eval_list:
|
||||
|
@ -118,6 +118,7 @@ class SeqReconstructionDataset(BaseDataset):
|
||||
def __getitem__(self, index):
|
||||
data_item_info = self.datalist[index]
|
||||
max_coverage_rate = data_item_info["max_coverage_rate"]
|
||||
best_seq_len = data_item_info["best_seq_len"]
|
||||
scene_name = data_item_info["scene_name"]
|
||||
(
|
||||
scanned_views_pts,
|
||||
@ -158,6 +159,7 @@ class SeqReconstructionDataset(BaseDataset):
|
||||
"first_scanned_coverage_rate": scanned_coverages_rate, # List(S): Float, range(0, 1)
|
||||
"first_scanned_n_to_world_pose_9d": np.asarray(scanned_n_to_world_pose, dtype=np.float32), # Ndarray(S x 9)
|
||||
"seq_max_coverage_rate": max_coverage_rate, # Float, range(0, 1)
|
||||
"best_seq_len": best_seq_len, # Int
|
||||
"scene_name": scene_name, # String
|
||||
"gt_pts": gt_pts, # Ndarray(N x 3)
|
||||
"scene_path": os.path.join(self.root_dir, scene_name), # String
|
||||
|
@ -72,7 +72,7 @@ class Inferencer(Runner):
|
||||
data = test_set.__getitem__(i)
|
||||
status_manager.set_progress("inference", "inferencer", f"Batch[{test_set_name}]", i+1, total)
|
||||
output = self.predict_sequence(data)
|
||||
self.save_inference_result(test_set_name, data["scene_name"][0], output)
|
||||
self.save_inference_result(test_set_name, data["scene_name"], output)
|
||||
|
||||
status_manager.set_progress("inference", "inferencer", f"dataset", len(self.test_set_list), len(self.test_set_list))
|
||||
|
||||
@ -107,6 +107,7 @@ class Inferencer(Runner):
|
||||
retry_no_pts_pose = []
|
||||
retry = 0
|
||||
pred_cr_seq = [last_pred_cr]
|
||||
success = 0
|
||||
while len(pred_cr_seq) < max_iter and retry < max_retry:
|
||||
|
||||
output = self.pipeline(input_data)
|
||||
@ -132,11 +133,13 @@ class Inferencer(Runner):
|
||||
retry += 1
|
||||
continue
|
||||
|
||||
|
||||
pred_cr, new_added_pts_num = self.compute_coverage_rate(scanned_view_pts, new_target_pts, down_sampled_model_pts, threshold=voxel_threshold)
|
||||
print(pred_cr, last_pred_cr, " max: ", data["seq_max_coverage_rate"])
|
||||
if pred_cr >= data["seq_max_coverage_rate"] - 1e-3:
|
||||
print("max coverage rate reached!: ", pred_cr)
|
||||
if new_added_pts_num < 10:
|
||||
success += 1
|
||||
elif new_added_pts_num < 10:
|
||||
print("min added pts num reached!: ", new_added_pts_num)
|
||||
if pred_cr <= last_pred_cr + cr_increase_threshold:
|
||||
retry += 1
|
||||
@ -156,32 +159,29 @@ class Inferencer(Runner):
|
||||
random_downsampled_combined_scanned_pts_np = PtsUtil.random_downsample_point_cloud(voxel_downsampled_combined_scanned_pts_np, input_pts_N)
|
||||
input_data["combined_scanned_pts"] = torch.tensor(random_downsampled_combined_scanned_pts_np, dtype=torch.float32).unsqueeze(0).to(self.device)
|
||||
|
||||
|
||||
if success > 3:
|
||||
break
|
||||
last_pred_cr = pred_cr
|
||||
|
||||
|
||||
input_data["scanned_pts"] = input_data["scanned_pts"][0].cpu().numpy().tolist()
|
||||
input_data["scanned_n_to_world_pose_9d"] = input_data["scanned_n_to_world_pose_9d"][0].cpu().numpy().tolist()
|
||||
result = {
|
||||
"pred_pose_9d_seq": input_data["scanned_n_to_world_pose_9d"],
|
||||
"pts_seq": input_data["scanned_pts"],
|
||||
"combined_scanned_pts": input_data["combined_scanned_pts"],
|
||||
"target_pts_seq": scanned_view_pts,
|
||||
"coverage_rate_seq": pred_cr_seq,
|
||||
"max_coverage_rate": data["max_coverage_rate"][0],
|
||||
"max_coverage_rate": data["seq_max_coverage_rate"],
|
||||
"pred_max_coverage_rate": max(pred_cr_seq),
|
||||
"scene_name": scene_name,
|
||||
"retry_no_pts_pose": retry_no_pts_pose,
|
||||
"retry_duplication_pose": retry_duplication_pose,
|
||||
"best_seq_len": data["best_seq_len"][0],
|
||||
"best_seq_len": data["best_seq_len"],
|
||||
}
|
||||
self.stat_result[scene_name] = {
|
||||
"max_coverage_rate": data["max_coverage_rate"][0],
|
||||
"success_rate": max(pred_cr_seq)/ data["max_coverage_rate"][0],
|
||||
"coverage_rate_seq": pred_cr_seq,
|
||||
"pred_max_coverage_rate": max(pred_cr_seq),
|
||||
"pred_seq_len": len(pred_cr_seq),
|
||||
}
|
||||
print('success rate: ', max(pred_cr_seq) / data["max_coverage_rate"][0])
|
||||
print('success rate: ', max(pred_cr_seq))
|
||||
|
||||
return result
|
||||
|
||||
|
@ -71,7 +71,6 @@ class RenderUtil:
|
||||
result = subprocess.run([
|
||||
'blender', '-b', '-P', script_path, '--', temp_dir
|
||||
], capture_output=True, text=True)
|
||||
|
||||
if result.returncode != 0:
|
||||
print("Blender script failed:")
|
||||
print(result.stderr)
|
||||
|
Loading…
x
Reference in New Issue
Block a user