change DataLoadUtil and Dataset to blender version
This commit is contained in:
@@ -1,70 +1,36 @@
|
||||
import os
|
||||
import OpenEXR
|
||||
import Imath
|
||||
import numpy as np
|
||||
import json
|
||||
import cv2
|
||||
import re
|
||||
import trimesh
|
||||
|
||||
class DataLoadUtil:
|
||||
|
||||
@staticmethod
|
||||
def get_path(root, scene_idx, frame_idx):
|
||||
path = os.path.join(root, f"sequence.{scene_idx}", f"step{frame_idx}")
|
||||
def get_path(root, scene_name, frame_idx):
|
||||
path = os.path.join(root, scene_name, f"{frame_idx}")
|
||||
return path
|
||||
|
||||
@staticmethod
|
||||
def get_label_path(root, scene_idx):
|
||||
path = os.path.join(root, f"sequence.{scene_idx}_label.json")
|
||||
def get_label_path(root, scene_name):
|
||||
path = os.path.join(root,scene_name, f"label.json")
|
||||
return path
|
||||
|
||||
@staticmethod
|
||||
def get_scene_idx_list(root):
|
||||
scene_dir = os.listdir(root)
|
||||
scene_idx_list = []
|
||||
for scene in scene_dir:
|
||||
if "sequence" in scene:
|
||||
scene_idx = int(re.search(r'\d+', scene).group())
|
||||
scene_idx_list.append(scene_idx)
|
||||
return scene_idx_list
|
||||
|
||||
@staticmethod
|
||||
def get_frame_idx_list(root, scene_idx):
|
||||
scene_path = os.path.join(root, f"sequence.{scene_idx}")
|
||||
view_dir = os.listdir(scene_path)
|
||||
seen_frame_idx = set()
|
||||
for view in view_dir:
|
||||
if "step" in view:
|
||||
frame_idx = int(re.search(r'\d+', view).group())
|
||||
seen_frame_idx.add(frame_idx)
|
||||
return list(seen_frame_idx)
|
||||
|
||||
@staticmethod
|
||||
def load_model_points(root,scene_idx):
|
||||
model_path = os.path.join(root, f"sequence.{scene_idx}", "world_points.txt")
|
||||
model_pts = np.loadtxt(model_path)
|
||||
return model_pts
|
||||
|
||||
@staticmethod
|
||||
def read_exr_depth(depth_path):
|
||||
file = OpenEXR.InputFile(depth_path)
|
||||
|
||||
dw = file.header()['dataWindow']
|
||||
width = dw.max.x - dw.min.x + 1
|
||||
height = dw.max.y - dw.min.y + 1
|
||||
|
||||
pix_type = Imath.PixelType(Imath.PixelType.FLOAT)
|
||||
depth_map = np.frombuffer(file.channel('R', pix_type), dtype=np.float32)
|
||||
|
||||
depth_map.shape = (height, width)
|
||||
|
||||
return depth_map
|
||||
def load_model_points(root, scene_name):
|
||||
model_path = os.path.join(root, scene_name, "sampled_model_points.txt")
|
||||
mesh = trimesh.load(model_path)
|
||||
return mesh.vertices
|
||||
|
||||
@staticmethod
|
||||
def load_depth(path):
|
||||
depth_path = path + ".camera.Depth.exr"
|
||||
depth_map = DataLoadUtil.read_exr_depth(depth_path)
|
||||
return depth_map
|
||||
depth_path = os.path.join(os.path.dirname(path), "depth", os.path.basename(path) + ".png")
|
||||
depth = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED)
|
||||
depth = depth.astype(np.float32) / 65535.0
|
||||
min_depth = 0.01
|
||||
max_depth = 5.0
|
||||
depth_meters = min_depth + (max_depth - min_depth) * depth
|
||||
return depth_meters
|
||||
|
||||
@staticmethod
|
||||
def load_label(path):
|
||||
@@ -74,49 +40,41 @@ class DataLoadUtil:
|
||||
|
||||
@staticmethod
|
||||
def load_rgb(path):
|
||||
rgb_path = path + ".camera.png"
|
||||
rgb_path = os.path.join(os.path.dirname(path), "rgb", os.path.basename(path) + ".png")
|
||||
rgb_image = cv2.imread(rgb_path, cv2.IMREAD_COLOR)
|
||||
return rgb_image
|
||||
|
||||
@staticmethod
|
||||
def load_seg(path):
|
||||
seg_path = path + ".camera.semantic segmentation.png"
|
||||
seg_image = cv2.imread(seg_path, cv2.IMREAD_COLOR)
|
||||
return seg_image
|
||||
mask_path = os.path.join(os.path.dirname(path), "mask", os.path.basename(path) + ".png")
|
||||
mask_image = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
|
||||
return mask_image
|
||||
|
||||
@staticmethod
|
||||
def load_cam_info(path):
|
||||
label_path = path + ".camera_params.json"
|
||||
with open(label_path, 'r') as f:
|
||||
label_data = json.load(f)
|
||||
cam_transform = np.asarray(label_data['cam_to_world']).reshape(
|
||||
(4, 4)
|
||||
).T
|
||||
|
||||
def cam_pose_transformation(cam_pose_before):
|
||||
offset = np.asarray([
|
||||
[1, 0, 0, 0],
|
||||
[0, -1, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, -1, 0],
|
||||
[0, 0, 0, 1]])
|
||||
|
||||
cam_to_world = cam_transform @ offset
|
||||
|
||||
|
||||
|
||||
f_x = label_data['f_x']
|
||||
f_y = label_data['f_y']
|
||||
c_x = label_data['c_x']
|
||||
c_y = label_data['c_y']
|
||||
cam_intrinsic = np.array([[f_x, 0, c_x], [0, f_y, c_y], [0, 0, 1]])
|
||||
|
||||
cam_pose_after = cam_pose_before @ offset
|
||||
return cam_pose_after
|
||||
|
||||
@staticmethod
|
||||
def load_cam_info(path):
|
||||
camera_params_path = os.path.join(os.path.dirname(path), "camera_params", os.path.basename(path) + ".json")
|
||||
with open(camera_params_path, 'r') as f:
|
||||
label_data = json.load(f)
|
||||
cam_to_world = np.asarray(label_data["extrinsic"])
|
||||
cam_to_world = DataLoadUtil.cam_pose_transformation(cam_to_world)
|
||||
cam_intrinsic = np.asarray(label_data["intrinsic"])
|
||||
return {
|
||||
"cam_to_world": cam_to_world,
|
||||
"cam_intrinsic": cam_intrinsic
|
||||
}
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_target_point_cloud(depth, cam_intrinsic, cam_extrinsic, mask, target_mask_label=(255,255,255)):
|
||||
def get_target_point_cloud(depth, cam_intrinsic, cam_extrinsic, mask, target_mask_label=255):
|
||||
h, w = depth.shape
|
||||
i, j = np.meshgrid(np.arange(w), np.arange(h), indexing='xy')
|
||||
|
||||
@@ -125,34 +83,16 @@ class DataLoadUtil:
|
||||
y = (j - cam_intrinsic[1, 2]) * z / cam_intrinsic[1, 1]
|
||||
|
||||
points_camera = np.stack((x, y, z), axis=-1).reshape(-1, 3)
|
||||
points_camera_aug = np.concatenate([points_camera, np.ones((points_camera.shape[0], 1))], axis=-1)
|
||||
|
||||
points_world = np.dot(cam_extrinsic, points_camera_aug.T).T[:, :3]
|
||||
mask = mask.reshape(-1, 3)
|
||||
target_mask = np.all(mask == target_mask_label, axis=-1)
|
||||
target_mask = np.all(mask == target_mask_label)
|
||||
target_points_camera = points_camera[target_mask]
|
||||
target_points_camera_aug = np.concatenate([target_points_camera, np.ones((target_points_camera.shape[0], 1))], axis=-1)
|
||||
|
||||
target_points_world = np.dot(cam_extrinsic, target_points_camera_aug.T).T[:, :3]
|
||||
|
||||
return {
|
||||
"points_world": points_world[target_mask],
|
||||
"points_camera": points_camera[target_mask]
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def get_target_point_cloud(depth, cam_intrinsic, cam_extrinsic, mask, target_mask_label=(255,255,255)):
|
||||
h, w = depth.shape
|
||||
i, j = np.meshgrid(np.arange(w), np.arange(h), indexing='xy')
|
||||
|
||||
z = depth
|
||||
x = (i - cam_intrinsic[0, 2]) * z / cam_intrinsic[0, 0]
|
||||
y = (j - cam_intrinsic[1, 2]) * z / cam_intrinsic[1, 1]
|
||||
|
||||
points_camera = np.stack((x, y, z), axis=-1).reshape(-1, 3)
|
||||
points_camera_aug = np.concatenate([points_camera, np.ones((points_camera.shape[0], 1))], axis=-1)
|
||||
|
||||
points_world = np.dot(cam_extrinsic, points_camera_aug.T).T[:, :3]
|
||||
mask = mask.reshape(-1, 3)
|
||||
target_mask = np.all(mask == target_mask_label, axis=-1)
|
||||
return {
|
||||
"points_world": points_world[target_mask],
|
||||
"points_camera": points_camera[target_mask]
|
||||
"points_world": target_points_world,
|
||||
"points_camera": target_points_camera
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
|
Reference in New Issue
Block a user