debug strategy_generator
This commit is contained in:
parent
38f7f8df18
commit
fd96b97d7b
@ -11,16 +11,20 @@ runner:
|
|||||||
root_dir: "experiments"
|
root_dir: "experiments"
|
||||||
|
|
||||||
generate:
|
generate:
|
||||||
voxel_threshold: 0.005
|
voxel_threshold: 0.01
|
||||||
overlap_threshold: 0.5
|
overlap_threshold: 0.5
|
||||||
|
filter_degree: 75
|
||||||
to_specified_dir: True # if True, output_dir is used, otherwise, root_dir is used
|
to_specified_dir: True # if True, output_dir is used, otherwise, root_dir is used
|
||||||
save_points: True
|
save_points: False
|
||||||
|
save_best_combined_points: True
|
||||||
|
save_mesh: True
|
||||||
dataset_list:
|
dataset_list:
|
||||||
- OmniObject3d
|
- OmniObject3d
|
||||||
|
|
||||||
datasets:
|
datasets:
|
||||||
OmniObject3d:
|
OmniObject3d:
|
||||||
root_dir: "/media/hofee/data/data/temp_output"
|
root_dir: "/media/hofee/data/project/python/nbv_reconstruction/nbv_rec_visualize/data/sample"
|
||||||
output_dir: "/media/hofee/data/data/label_output"
|
model_dir: "/media/hofee/data/data/scaled_object_meshes"
|
||||||
|
#output_dir: "/media/hofee/data/data/label_output"
|
||||||
|
|
||||||
|
|
||||||
|
@ -12,7 +12,10 @@ runner:
|
|||||||
output_dir: /media/hofee/data/data/temp_output
|
output_dir: /media/hofee/data/data/temp_output
|
||||||
binocular_vision: true
|
binocular_vision: true
|
||||||
plane_size: 10
|
plane_size: 10
|
||||||
max_views: 100
|
max_views: 256
|
||||||
|
min_views: 64
|
||||||
|
max_diag: 0.7
|
||||||
|
min_diag: 0.1
|
||||||
random_config:
|
random_config:
|
||||||
display_table:
|
display_table:
|
||||||
min_height: 0.05
|
min_height: 0.05
|
||||||
|
@ -23,6 +23,10 @@ class StrategyGenerator(Runner):
|
|||||||
"runner_name": "strategy_generator"
|
"runner_name": "strategy_generator"
|
||||||
}
|
}
|
||||||
self.to_specified_dir = ConfigManager.get("runner", "generate", "to_specified_dir")
|
self.to_specified_dir = ConfigManager.get("runner", "generate", "to_specified_dir")
|
||||||
|
self.save_best_combined_pts = ConfigManager.get("runner", "generate", "save_best_combined_points")
|
||||||
|
self.save_mesh = ConfigManager.get("runner", "generate", "save_mesh")
|
||||||
|
self.filter_degree = ConfigManager.get("runner", "generate", "filter_degree")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
@ -33,13 +37,14 @@ class StrategyGenerator(Runner):
|
|||||||
dataset_name = dataset_name_list[dataset_idx]
|
dataset_name = dataset_name_list[dataset_idx]
|
||||||
status_manager.set_progress("generate", "strategy_generator", "dataset", dataset_idx, len(dataset_name_list))
|
status_manager.set_progress("generate", "strategy_generator", "dataset", dataset_idx, len(dataset_name_list))
|
||||||
root_dir = ConfigManager.get("datasets", dataset_name, "root_dir")
|
root_dir = ConfigManager.get("datasets", dataset_name, "root_dir")
|
||||||
scene_name_list = os.listdir(root_dir)[:10]
|
model_dir = ConfigManager.get("datasets", dataset_name, "model_dir")
|
||||||
|
scene_name_list = os.listdir(root_dir)
|
||||||
cnt = 0
|
cnt = 0
|
||||||
total = len(scene_name_list)
|
total = len(scene_name_list)
|
||||||
for scene_name in scene_name_list:
|
for scene_name in scene_name_list:
|
||||||
Log.info(f"({dataset_name})Processing [{cnt}/{total}]: {scene_name}")
|
Log.info(f"({dataset_name})Processing [{cnt}/{total}]: {scene_name}")
|
||||||
status_manager.set_progress("generate", "strategy_generator", "scene", cnt, total)
|
status_manager.set_progress("generate", "strategy_generator", "scene", cnt, total)
|
||||||
self.generate_sequence(root_dir, dataset_name, scene_name,voxel_threshold, overlap_threshold, )
|
self.generate_sequence(root_dir, model_dir, scene_name,voxel_threshold, overlap_threshold)
|
||||||
cnt += 1
|
cnt += 1
|
||||||
status_manager.set_progress("generate", "strategy_generator", "scene", total, total)
|
status_manager.set_progress("generate", "strategy_generator", "scene", total, total)
|
||||||
status_manager.set_progress("generate", "strategy_generator", "dataset", len(dataset_name_list), len(dataset_name_list))
|
status_manager.set_progress("generate", "strategy_generator", "dataset", len(dataset_name_list), len(dataset_name_list))
|
||||||
@ -52,7 +57,7 @@ class StrategyGenerator(Runner):
|
|||||||
def load_experiment(self, backup_name=None):
|
def load_experiment(self, backup_name=None):
|
||||||
super().load_experiment(backup_name)
|
super().load_experiment(backup_name)
|
||||||
|
|
||||||
def generate_sequence(self, root, dataset_name, scene_name, voxel_threshold, overlap_threshold):
|
def generate_sequence(self, root, model_dir, scene_name, voxel_threshold, overlap_threshold):
|
||||||
status_manager.set_status("generate", "strategy_generator", "scene", scene_name)
|
status_manager.set_status("generate", "strategy_generator", "scene", scene_name)
|
||||||
frame_num = DataLoadUtil.get_scene_seq_length(root, scene_name)
|
frame_num = DataLoadUtil.get_scene_seq_length(root, scene_name)
|
||||||
model_points_normals = DataLoadUtil.load_points_normals(root, scene_name)
|
model_points_normals = DataLoadUtil.load_points_normals(root, scene_name)
|
||||||
@ -66,7 +71,9 @@ class StrategyGenerator(Runner):
|
|||||||
cam_params = DataLoadUtil.load_cam_info(path, binocular=True)
|
cam_params = DataLoadUtil.load_cam_info(path, binocular=True)
|
||||||
status_manager.set_progress("generate", "strategy_generator", "loading frame", frame_idx, frame_num)
|
status_manager.set_progress("generate", "strategy_generator", "loading frame", frame_idx, frame_num)
|
||||||
point_cloud = DataLoadUtil.get_target_point_cloud_world_from_path(path, binocular=True)
|
point_cloud = DataLoadUtil.get_target_point_cloud_world_from_path(path, binocular=True)
|
||||||
sampled_point_cloud = ReconstructionUtil.filter_points(point_cloud, model_points_normals, cam_pose=cam_params["cam_to_world"], voxel_size=voxel_threshold, theta=45)
|
#display_table = None #DataLoadUtil.get_target_point_cloud_world_from_path(path, binocular=True, target_mask_label=()) #TODO
|
||||||
|
sampled_point_cloud = ReconstructionUtil.filter_points(point_cloud, model_points_normals, cam_pose=cam_params["cam_to_world"], voxel_size=voxel_threshold, theta=self.filter_degree)
|
||||||
|
|
||||||
if self.save_pts:
|
if self.save_pts:
|
||||||
pts_dir = os.path.join(root,scene_name, "pts")
|
pts_dir = os.path.join(root,scene_name, "pts")
|
||||||
if not os.path.exists(pts_dir):
|
if not os.path.exists(pts_dir):
|
||||||
@ -75,7 +82,7 @@ class StrategyGenerator(Runner):
|
|||||||
pts_list.append(sampled_point_cloud)
|
pts_list.append(sampled_point_cloud)
|
||||||
status_manager.set_progress("generate", "strategy_generator", "loading frame", frame_num, frame_num)
|
status_manager.set_progress("generate", "strategy_generator", "loading frame", frame_num, frame_num)
|
||||||
|
|
||||||
limited_useful_view, _ = ReconstructionUtil.compute_next_best_view_sequence_with_overlap(down_sampled_model_pts, pts_list, threshold=voxel_threshold, overlap_threshold=overlap_threshold, status_info=self.status_info)
|
limited_useful_view, _, best_combined_pts = ReconstructionUtil.compute_next_best_view_sequence_with_overlap(down_sampled_model_pts, pts_list, threshold=voxel_threshold, overlap_threshold=overlap_threshold, status_info=self.status_info)
|
||||||
data_pairs = self.generate_data_pairs(limited_useful_view)
|
data_pairs = self.generate_data_pairs(limited_useful_view)
|
||||||
seq_save_data = {
|
seq_save_data = {
|
||||||
"data_pairs": data_pairs,
|
"data_pairs": data_pairs,
|
||||||
@ -85,17 +92,19 @@ class StrategyGenerator(Runner):
|
|||||||
|
|
||||||
status_manager.set_status("generate", "strategy_generator", "max_coverage_rate", limited_useful_view[-1][1])
|
status_manager.set_status("generate", "strategy_generator", "max_coverage_rate", limited_useful_view[-1][1])
|
||||||
Log.success(f"Scene <{scene_name}> Finished, Max Coverage Rate: {limited_useful_view[-1][1]}, Best Sequence length: {len(limited_useful_view)}")
|
Log.success(f"Scene <{scene_name}> Finished, Max Coverage Rate: {limited_useful_view[-1][1]}, Best Sequence length: {len(limited_useful_view)}")
|
||||||
if self.to_specified_dir:
|
|
||||||
output_dir = ConfigManager.get("datasets", dataset_name,"output_dir")
|
output_label_path = DataLoadUtil.get_label_path(root, scene_name)
|
||||||
output_label_path = os.path.join(output_dir, f"{scene_name}.json")
|
output_best_reconstructed_pts_path = os.path.join(root,scene_name, f"best_reconstructed_pts.txt")
|
||||||
if not os.path.exists(output_dir):
|
|
||||||
os.makedirs(output_dir)
|
|
||||||
else:
|
|
||||||
output_label_path = DataLoadUtil.get_label_path(root, scene_name)
|
|
||||||
|
|
||||||
with open(output_label_path, 'w') as f:
|
with open(output_label_path, 'w') as f:
|
||||||
json.dump(seq_save_data, f)
|
json.dump(seq_save_data, f)
|
||||||
|
|
||||||
|
if self.save_best_combined_pts:
|
||||||
|
np.savetxt(output_best_reconstructed_pts_path, best_combined_pts)
|
||||||
|
|
||||||
|
if self.save_mesh:
|
||||||
|
DataLoadUtil.save_target_mesh_at_world_space(root, model_dir, scene_name)
|
||||||
|
|
||||||
DataLoadUtil.save_downsampled_world_model_points(root, scene_name, down_sampled_model_pts)
|
DataLoadUtil.save_downsampled_world_model_points(root, scene_name, down_sampled_model_pts)
|
||||||
|
|
||||||
def generate_data_pairs(self, useful_view):
|
def generate_data_pairs(self, useful_view):
|
||||||
|
@ -3,6 +3,7 @@ import numpy as np
|
|||||||
import json
|
import json
|
||||||
import cv2
|
import cv2
|
||||||
import trimesh
|
import trimesh
|
||||||
|
from utils.pts import PtsUtil
|
||||||
|
|
||||||
class DataLoadUtil:
|
class DataLoadUtil:
|
||||||
|
|
||||||
@ -38,10 +39,34 @@ class DataLoadUtil:
|
|||||||
np.savetxt(model_path, model_points)
|
np.savetxt(model_path, model_points)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load_original_model_points(model_dir, object_name):
|
def load_mesh_at(model_dir, object_name, world_object_pose):
|
||||||
model_path = os.path.join(model_dir, object_name, "mesh.obj")
|
model_path = os.path.join(model_dir, object_name, "mesh.obj")
|
||||||
mesh = trimesh.load(model_path)
|
mesh = trimesh.load(model_path)
|
||||||
return mesh.vertices
|
mesh.apply_transform(world_object_pose)
|
||||||
|
return mesh
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def save_mesh_at(model_dir, output_dir, object_name, scene_name, world_object_pose):
|
||||||
|
mesh = DataLoadUtil.load_mesh_at(model_dir, object_name, world_object_pose)
|
||||||
|
model_path = os.path.join(output_dir, scene_name, "world_mesh.obj")
|
||||||
|
mesh.export(model_path)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def save_target_mesh_at_world_space(root, model_dir, scene_name):
|
||||||
|
scene_info = DataLoadUtil.load_scene_info(root, scene_name)
|
||||||
|
target_name = scene_info["target_name"]
|
||||||
|
transformation = scene_info[target_name]
|
||||||
|
location = transformation["location"]
|
||||||
|
rotation_euler = transformation["rotation_euler"]
|
||||||
|
pose_mat = trimesh.transformations.euler_matrix(*rotation_euler)
|
||||||
|
pose_mat[:3, 3] = location
|
||||||
|
|
||||||
|
mesh = DataLoadUtil.load_mesh_at(model_dir, target_name, pose_mat)
|
||||||
|
mesh_dir = os.path.join(root, scene_name, "mesh")
|
||||||
|
if not os.path.exists(mesh_dir):
|
||||||
|
os.makedirs(mesh_dir)
|
||||||
|
model_path = os.path.join(mesh_dir, "world_target_mesh.obj")
|
||||||
|
mesh.export(model_path)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load_scene_info(root, scene_name):
|
def load_scene_info(root, scene_name):
|
||||||
@ -169,14 +194,15 @@ class DataLoadUtil:
|
|||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_target_point_cloud_world_from_path(path, binocular=False):
|
def get_target_point_cloud_world_from_path(path, binocular=False, random_downsample_N=65536, voxel_size = 0.005, target_mask_label=(0,255,0,255)):
|
||||||
cam_info = DataLoadUtil.load_cam_info(path, binocular=binocular)
|
cam_info = DataLoadUtil.load_cam_info(path, binocular=binocular)
|
||||||
if binocular:
|
if binocular:
|
||||||
voxel_size = 0.0005
|
|
||||||
depth_L, depth_R = DataLoadUtil.load_depth(path, cam_info['near_plane'], cam_info['far_plane'], binocular=True)
|
depth_L, depth_R = DataLoadUtil.load_depth(path, cam_info['near_plane'], cam_info['far_plane'], binocular=True)
|
||||||
mask_L, mask_R = DataLoadUtil.load_seg(path, binocular=True)
|
mask_L, mask_R = DataLoadUtil.load_seg(path, binocular=True)
|
||||||
point_cloud_L = DataLoadUtil.get_target_point_cloud(depth_L, cam_info['cam_intrinsic'], cam_info['cam_to_world'], mask_L)['points_world']
|
point_cloud_L = DataLoadUtil.get_target_point_cloud(depth_L, cam_info['cam_intrinsic'], cam_info['cam_to_world'], mask_L, target_mask_label)['points_world']
|
||||||
point_cloud_R = DataLoadUtil.get_target_point_cloud(depth_R, cam_info['cam_intrinsic'], cam_info['cam_to_world_R'], mask_R)['points_world']
|
point_cloud_R = DataLoadUtil.get_target_point_cloud(depth_R, cam_info['cam_intrinsic'], cam_info['cam_to_world_R'], mask_R, target_mask_label)['points_world']
|
||||||
|
point_cloud_L = PtsUtil.random_downsample_point_cloud(point_cloud_L, random_downsample_N)
|
||||||
|
point_cloud_R = PtsUtil.random_downsample_point_cloud(point_cloud_R, random_downsample_N)
|
||||||
overlap_points = DataLoadUtil.get_overlapping_points(point_cloud_L, point_cloud_R, voxel_size)
|
overlap_points = DataLoadUtil.get_overlapping_points(point_cloud_L, point_cloud_R, voxel_size)
|
||||||
return overlap_points
|
return overlap_points
|
||||||
else:
|
else:
|
||||||
@ -185,6 +211,7 @@ class DataLoadUtil:
|
|||||||
point_cloud = DataLoadUtil.get_target_point_cloud(depth, cam_info['cam_intrinsic'], cam_info['cam_to_world'], mask)['points_world']
|
point_cloud = DataLoadUtil.get_target_point_cloud(depth, cam_info['cam_intrinsic'], cam_info['cam_to_world'], mask)['points_world']
|
||||||
return point_cloud
|
return point_cloud
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def voxelize_points(points, voxel_size):
|
def voxelize_points(points, voxel_size):
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ class PtsUtil:
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def voxel_downsample_point_cloud(point_cloud, voxel_size=0.005):
|
def voxel_downsample_point_cloud(point_cloud, voxel_size=0.005):
|
||||||
|
print("voxel_size: ", voxel_size)
|
||||||
o3d_pc = o3d.geometry.PointCloud()
|
o3d_pc = o3d.geometry.PointCloud()
|
||||||
o3d_pc.points = o3d.utility.Vector3dVector(point_cloud)
|
o3d_pc.points = o3d.utility.Vector3dVector(point_cloud)
|
||||||
downsampled_pc = o3d_pc.voxel_down_sample(voxel_size)
|
downsampled_pc = o3d_pc.voxel_down_sample(voxel_size)
|
||||||
@ -18,5 +19,5 @@ class PtsUtil:
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def random_downsample_point_cloud(point_cloud, num_points):
|
def random_downsample_point_cloud(point_cloud, num_points):
|
||||||
idx = np.random.choice(len(point_cloud), num_points, replace=False)
|
idx = np.random.choice(len(point_cloud), num_points, replace=True)
|
||||||
return point_cloud[idx]
|
return point_cloud[idx]
|
@ -6,6 +6,7 @@ class ReconstructionUtil:
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def compute_coverage_rate(target_point_cloud, combined_point_cloud, threshold=0.01):
|
def compute_coverage_rate(target_point_cloud, combined_point_cloud, threshold=0.01):
|
||||||
|
print("threshold", threshold)
|
||||||
kdtree = cKDTree(combined_point_cloud)
|
kdtree = cKDTree(combined_point_cloud)
|
||||||
distances, _ = kdtree.query(target_point_cloud)
|
distances, _ = kdtree.query(target_point_cloud)
|
||||||
covered_points = np.sum(distances < threshold)
|
covered_points = np.sum(distances < threshold)
|
||||||
@ -45,7 +46,7 @@ class ReconstructionUtil:
|
|||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def compute_next_best_view_sequence_with_overlap(target_point_cloud, point_cloud_list, threshold=0.01, overlap_threshold=0.3, status_info=None):
|
def compute_next_best_view_sequence_with_overlap(target_point_cloud, point_cloud_list, display_table_point_cloud_list = None,threshold=0.01, overlap_threshold=0.3, status_info=None):
|
||||||
selected_views = []
|
selected_views = []
|
||||||
current_coverage = 0.0
|
current_coverage = 0.0
|
||||||
remaining_views = list(range(len(point_cloud_list)))
|
remaining_views = list(range(len(point_cloud_list)))
|
||||||
@ -74,22 +75,21 @@ class ReconstructionUtil:
|
|||||||
if coverage_increase > best_coverage_increase:
|
if coverage_increase > best_coverage_increase:
|
||||||
best_coverage_increase = coverage_increase
|
best_coverage_increase = coverage_increase
|
||||||
best_view = view_index
|
best_view = view_index
|
||||||
cnt_processed_view += 1
|
|
||||||
if status_info is not None:
|
|
||||||
|
|
||||||
sm = status_info["status_manager"]
|
|
||||||
app_name = status_info["app_name"]
|
|
||||||
runner_name = status_info["runner_name"]
|
|
||||||
sm.set_status(app_name, runner_name, "current coverage", current_coverage)
|
|
||||||
sm.set_progress(app_name, runner_name, "processed view", cnt_processed_view, len(point_cloud_list))
|
|
||||||
|
|
||||||
if best_view is not None:
|
if best_view is not None:
|
||||||
if best_coverage_increase <=1e-3:
|
if best_coverage_increase <=1e-3:
|
||||||
break
|
break
|
||||||
selected_views.append(point_cloud_list[best_view])
|
selected_views.append(point_cloud_list[best_view])
|
||||||
remaining_views.remove(best_view)
|
remaining_views.remove(best_view)
|
||||||
if best_coverage_increase > 0:
|
current_coverage += best_coverage_increase
|
||||||
current_coverage += best_coverage_increase
|
cnt_processed_view += 1
|
||||||
|
if status_info is not None:
|
||||||
|
sm = status_info["status_manager"]
|
||||||
|
app_name = status_info["app_name"]
|
||||||
|
runner_name = status_info["runner_name"]
|
||||||
|
sm.set_status(app_name, runner_name, "current coverage", current_coverage)
|
||||||
|
sm.set_progress(app_name, runner_name, "processed view", cnt_processed_view, len(point_cloud_list))
|
||||||
|
|
||||||
view_sequence.append((best_view, current_coverage))
|
view_sequence.append((best_view, current_coverage))
|
||||||
|
|
||||||
@ -100,7 +100,7 @@ class ReconstructionUtil:
|
|||||||
app_name = status_info["app_name"]
|
app_name = status_info["app_name"]
|
||||||
runner_name = status_info["runner_name"]
|
runner_name = status_info["runner_name"]
|
||||||
sm.set_progress(app_name, runner_name, "processed view", len(point_cloud_list), len(point_cloud_list))
|
sm.set_progress(app_name, runner_name, "processed view", len(point_cloud_list), len(point_cloud_list))
|
||||||
return view_sequence, remaining_views
|
return view_sequence, remaining_views, down_sampled_combined_point_cloud
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def filter_points(points, points_normals, cam_pose, voxel_size=0.005, theta=45):
|
def filter_points(points, points_normals, cam_pose, voxel_size=0.005, theta=45):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user