change DataLoadUtil and Dataset to blender version
This commit is contained in:
@@ -1,10 +1,15 @@
|
||||
import os
|
||||
import numpy as np
|
||||
from PytorchBoot.dataset import BaseDataset
|
||||
import PytorchBoot.stereotype as stereotype
|
||||
|
||||
import sys
|
||||
sys.path.append(r"C:\Document\Local Project\nbv_rec\nbv_reconstruction")
|
||||
|
||||
from utils.data_load import DataLoadUtil
|
||||
from utils.pose import PoseUtil
|
||||
|
||||
|
||||
@stereotype.dataset("nbv_reconstruction_dataset")
|
||||
@stereotype.dataset("nbv_reconstruction_dataset", comment="to be modified")
|
||||
class NBVReconstructionDataset(BaseDataset):
|
||||
def __init__(self, config):
|
||||
super(NBVReconstructionDataset, self).__init__(config)
|
||||
@@ -15,9 +20,9 @@ class NBVReconstructionDataset(BaseDataset):
|
||||
|
||||
def get_datalist(self):
|
||||
datalist = []
|
||||
scene_idx_list = DataLoadUtil.get_scene_idx_list(self.root_dir)
|
||||
for scene_idx in scene_idx_list:
|
||||
label_path = DataLoadUtil.get_label_path(self.label_dir, scene_idx)
|
||||
scene_name_list = os.listdir(self.root_dir)
|
||||
for scene_name in scene_name_list:
|
||||
label_path = DataLoadUtil.get_label_path(self.label_dir, scene_name)
|
||||
label_data = DataLoadUtil.load_label(label_path)
|
||||
for data_pair in label_data["data_pairs"]:
|
||||
scanned_views = data_pair[0]
|
||||
@@ -28,7 +33,7 @@ class NBVReconstructionDataset(BaseDataset):
|
||||
"scanned_views": scanned_views,
|
||||
"next_best_view": next_best_view,
|
||||
"max_coverage_rate": max_coverage_rate,
|
||||
"scene_idx": scene_idx,
|
||||
"scene_name": scene_name,
|
||||
}
|
||||
)
|
||||
return datalist
|
||||
@@ -38,32 +43,39 @@ class NBVReconstructionDataset(BaseDataset):
|
||||
scanned_views = data_item_info["scanned_views"]
|
||||
nbv = data_item_info["next_best_view"]
|
||||
max_coverage_rate = data_item_info["max_coverage_rate"]
|
||||
scene_idx = data_item_info["scene_idx"]
|
||||
scene_name = data_item_info["scene_name"]
|
||||
scanned_views_pts, scanned_coverages_rate, scanned_cam_pose = [], [], []
|
||||
for view in scanned_views:
|
||||
frame_idx = view[0]
|
||||
coverage_rate = view[1]
|
||||
view_path = DataLoadUtil.get_path(self.root_dir, scene_idx, frame_idx)
|
||||
view_path = DataLoadUtil.get_path(self.root_dir, scene_name, frame_idx)
|
||||
pts = DataLoadUtil.load_depth(view_path)
|
||||
scanned_views_pts.append(pts)
|
||||
scanned_coverages_rate.append(coverage_rate)
|
||||
cam_pose = DataLoadUtil.load_cam_info(view_path)["cam_to_world"]
|
||||
scanned_cam_pose.append(cam_pose)
|
||||
|
||||
cam_pose_6d = PoseUtil.matrix_to_rotation_6d_numpy(np.asarray(cam_pose[:3,:3]))
|
||||
translation = cam_pose[:3,3]
|
||||
cam_pose_9d = np.concatenate([cam_pose_6d, translation], axis=0)
|
||||
scanned_cam_pose.append(cam_pose_9d)
|
||||
|
||||
nbv_idx, nbv_coverage_rate = nbv[0], nbv[1]
|
||||
nbv_path = DataLoadUtil.get_path(self.root_dir, scene_idx, nbv_idx)
|
||||
nbv_path = DataLoadUtil.get_path(self.root_dir, scene_name, nbv_idx)
|
||||
nbv_pts = DataLoadUtil.load_depth(nbv_path)
|
||||
cam_info = DataLoadUtil.load_cam_info(nbv_path)
|
||||
nbv_cam_pose = cam_info["cam_to_world"]
|
||||
|
||||
nbv_cam_pose_6d = PoseUtil.matrix_to_rotation_6d_numpy(np.asarray(nbv_cam_pose[:3,:3]))
|
||||
translation = nbv_cam_pose[:3,3]
|
||||
nbv_cam_pose_9d = np.concatenate([nbv_cam_pose_6d, translation], axis=0)
|
||||
data_item = {
|
||||
"scanned_views_pts": np.asarray(scanned_views_pts,dtype=np.float32),
|
||||
"scanned_coverages_rate": np.asarray(scanned_coverages_rate,dtype=np.float32),
|
||||
"scanned_cam_pose": np.asarray(scanned_cam_pose,dtype=np.float32),
|
||||
"nbv_pts": np.asarray(nbv_pts,dtype=np.float32),
|
||||
"nbv_coverage_rate": nbv_coverage_rate,
|
||||
"nbv_cam_pose": nbv_cam_pose,
|
||||
"nbv_cam_pose": nbv_cam_pose_9d,
|
||||
"max_coverage_rate": max_coverage_rate,
|
||||
"scene_name": scene_name
|
||||
}
|
||||
|
||||
return data_item
|
||||
|
@@ -1,23 +1,37 @@
|
||||
import torch
|
||||
from utils.pose import PoseUtil
|
||||
import PytorchBoot.stereotype as stereotype
|
||||
import PytorchBoot.namespace as namespace
|
||||
|
||||
@stereotype.evaluation_method("delta_pose_diff")
|
||||
class DeltaPoseDiff:
|
||||
def get_view_data(cam_pose, scene_name):
|
||||
pass
|
||||
|
||||
@stereotype.evaluation_method("pose_diff", comment="not tested")
|
||||
class PoseDiff:
|
||||
def __init__(self, _):
|
||||
pass
|
||||
|
||||
def evaluate(self, output_list, data_list):
|
||||
results = {namespace.TensorBoard.SCALAR: {}}
|
||||
rot_angle_list = []
|
||||
trans_dist_list = []
|
||||
for output, data in zip(output_list, data_list):
|
||||
gt_delta_rot_6d = data['delta_rot_6d']
|
||||
est_delta_rot_6d = output['estimated_delta_rot_6d']
|
||||
gt_delta_rot_mat = PoseUtil.rotation_6d_to_matrix_tensor_batch(gt_delta_rot_6d)
|
||||
est_delta_rot_mat = PoseUtil.rotation_6d_to_matrix_tensor_batch(est_delta_rot_6d)
|
||||
rotation_angles = PoseUtil.rotation_angle_distance(gt_delta_rot_mat, est_delta_rot_mat)
|
||||
gt_pose_9d = data['nbv_cam_pose']
|
||||
pred_pose_9d = output['pred_pose_9d']
|
||||
gt_rot_6d = gt_pose_9d[:, :6]
|
||||
gt_trans = gt_pose_9d[:, 6:]
|
||||
pred_rot_6d = pred_pose_9d[:, :6]
|
||||
pred_trans = pred_pose_9d[:, 6:]
|
||||
gt_rot_mat = PoseUtil.rotation_6d_to_matrix_tensor_batch(gt_rot_6d)
|
||||
pred_rot_mat = PoseUtil.rotation_6d_to_matrix_tensor_batch(pred_rot_6d)
|
||||
rotation_angles = PoseUtil.rotation_angle_distance(gt_rot_mat, pred_rot_mat)
|
||||
rot_angle_list.extend(list(rotation_angles))
|
||||
trans_dist = torch.norm(gt_trans-pred_trans)
|
||||
trans_dist_list.append(trans_dist)
|
||||
|
||||
|
||||
results[namespace.TensorBoard.SCALAR]["delta_rotation"] = float(sum(rot_angle_list) / len(rot_angle_list))
|
||||
results[namespace.TensorBoard.SCALAR]["rot_diff"] = float(sum(rot_angle_list) / len(rot_angle_list))
|
||||
results[namespace.TensorBoard.SCALAR]["trans_diff"] = float(sum(trans_dist_list) / len(trans_dist_list))
|
||||
return results
|
||||
|
||||
|
||||
@@ -25,8 +39,40 @@ class DeltaPoseDiff:
|
||||
@stereotype.evaluation_method("coverage_rate_increase",comment="unfinished")
|
||||
class ConverageRateIncrease:
|
||||
def __init__(self, config):
|
||||
pass
|
||||
self.config = config
|
||||
|
||||
|
||||
def evaluate(self, output_list, data_list):
|
||||
return
|
||||
results = {namespace.TensorBoard.SCALAR: {}}
|
||||
gt_coverate_increase_list = []
|
||||
pred_coverate_increase_list = []
|
||||
cr_diff_list = []
|
||||
for output, data in zip(output_list, data_list):
|
||||
scanned_cr = data['scanned_coverages_rate']
|
||||
gt_cr = data["nbv_coverage_rate"]
|
||||
scene_name_list = data['scene_name']
|
||||
scanned_view_pts_list = data['scanned_views_pts']
|
||||
pred_pose_9ds = output['pred_pose_9d']
|
||||
pred_rot_mats = PoseUtil.rotation_6d_to_matrix_tensor_batch(pred_pose_9ds[:, :6])
|
||||
pred_pose_mats = torch.cat([pred_rot_mats, pred_pose_9ds[:, 6:]], dim=-1)
|
||||
|
||||
for idx in range(len(scanned_cr)):
|
||||
gt_coverate_increase_list.append(gt_cr-scanned_cr[idx])
|
||||
scene_name = scene_name_list[idx]
|
||||
pred_pose = pred_pose_mats[idx]
|
||||
scanned_view_pts = scanned_view_pts_list[idx]
|
||||
view_data = get_view_data(pred_pose, scene_name)
|
||||
pred_cr = self.compute_coverage_rate(pred_pose, scanned_view_pts, view_data)
|
||||
pred_coverate_increase_list.append(pred_cr-scanned_cr[idx])
|
||||
cr_diff_list.append(gt_cr-pred_cr)
|
||||
|
||||
results[namespace.TensorBoard.SCALAR]["gt_cr_increase"] = float(sum(gt_coverate_increase_list) / len(gt_coverate_increase_list))
|
||||
results[namespace.TensorBoard.SCALAR]["pred_cr_increase"] = float(sum(pred_coverate_increase_list) / len(pred_coverate_increase_list))
|
||||
results[namespace.TensorBoard.SCALAR]["cr_diff"] = float(sum(cr_diff_list) / len(cr_diff_list))
|
||||
return results
|
||||
|
||||
def compute_coverage_rate(self, pred_pose, scanned_view_pts, view_data):
|
||||
pass
|
||||
|
||||
|
||||
|
@@ -38,7 +38,7 @@ class NBVReconstructionPipeline(nn.Module):
|
||||
def forward_train(self, data):
|
||||
pts_list = data['pts_list']
|
||||
pose_list = data['pose_list']
|
||||
gt_delta_rot_6d = data["delta_rot_6d"]
|
||||
gt_rot_6d = data["nbv_cam_pose"]
|
||||
pts_feat_list = []
|
||||
pose_feat_list = []
|
||||
for pts,pose in zip(pts_list,pose_list):
|
||||
@@ -46,7 +46,7 @@ class NBVReconstructionPipeline(nn.Module):
|
||||
pose_feat_list.append(self.pose_encoder.encode_pose(pose))
|
||||
seq_feat = self.seq_encoder.encode_sequence(pts_feat_list, pose_feat_list)
|
||||
''' get std '''
|
||||
perturbed_x, random_t, target_score, std = self.pertube_data(gt_delta_rot_6d)
|
||||
perturbed_x, random_t, target_score, std = self.pertube_data(gt_rot_6d)
|
||||
input_data = {
|
||||
"sampled_pose": perturbed_x,
|
||||
"t": random_t,
|
||||
@@ -69,9 +69,9 @@ class NBVReconstructionPipeline(nn.Module):
|
||||
pts_feat_list.append(self.pts_encoder.encode_points(pts))
|
||||
pose_feat_list.append(self.pose_encoder.encode_pose(pose))
|
||||
seq_feat = self.seq_encoder.encode_sequence(pts_feat_list, pose_feat_list)
|
||||
estimated_delta_rot_6d, in_process_sample = self.view_finder.next_best_view(seq_feat)
|
||||
estimated_delta_rot_9d, in_process_sample = self.view_finder.next_best_view(seq_feat)
|
||||
result = {
|
||||
"estimated_delta_rot_6d": estimated_delta_rot_6d,
|
||||
"pred_pose_9d": estimated_delta_rot_9d,
|
||||
"in_process_sample": in_process_sample
|
||||
}
|
||||
return result
|
||||
|
Reference in New Issue
Block a user