diff --git a/src/active_grasp/active_perception_policy.py b/src/active_grasp/active_perception_policy.py index 542f130..e04f8ec 100644 --- a/src/active_grasp/active_perception_policy.py +++ b/src/active_grasp/active_perception_policy.py @@ -60,9 +60,6 @@ class ActivePerceptionMultiViewPolicy(MultiViewPolicy): self.pcdvis = RealTime3DVisualizer() - def activate(self, bbox, view_sphere): - super().activate(bbox, view_sphere) - def update(self, img, seg, target_id, x, q): if len(self.views) > self.max_views or self.best_grasp_prediction_is_stable(): self.done = True diff --git a/src/active_grasp/controller.py b/src/active_grasp/controller.py index 263178d..e10a256 100644 --- a/src/active_grasp/controller.py +++ b/src/active_grasp/controller.py @@ -108,23 +108,33 @@ class GraspController: self.policy.activate(bbox, self.view_sphere) timer = rospy.Timer(rospy.Duration(1.0 / self.control_rate), self.send_vel_cmd) r = rospy.Rate(self.policy_rate) - while not self.policy.done: - depth_img, seg_image, pose, q = self.get_state() - target_seg_id = self.get_target_id(TargetIDRequest()).id - self.policy.update(depth_img, seg_image, target_seg_id, pose, q) - # Wait for the robot to move to its desired camera pose - moving_to_The_target = True - while(moving_to_The_target): + + if(self.policy.policy_type=="single_view"): + while not self.policy.done: depth_img, seg_image, pose, q = self.get_state() - current_p = pose.as_matrix()[:3,3] - target_p = self.policy.x_d.as_matrix()[:3,3] - linear_d = np.sqrt((current_p[0]-target_p[0])**2+ - (current_p[1]-target_p[1])**2+ - (current_p[2]-target_p[2])**2) - if(linear_d < self.move_to_target_threshold): - # Arrived - moving_to_The_target = False + target_seg_id = self.get_target_id(TargetIDRequest()).id + self.policy.update(depth_img, seg_image, target_seg_id, pose, q) + # Wait for the robot to move to its desired camera pose + moving_to_The_target = True + while(moving_to_The_target): + depth_img, seg_image, pose, q = self.get_state() + current_p = pose.as_matrix()[:3,3] + target_p = self.policy.x_d.as_matrix()[:3,3] + linear_d = np.sqrt((current_p[0]-target_p[0])**2+ + (current_p[1]-target_p[1])**2+ + (current_p[2]-target_p[2])**2) + if(linear_d < self.move_to_target_threshold): + # Arrived + moving_to_The_target = False + r.sleep() + elif(self.policy.policy_type=="multi_view"): + while not self.policy.done: + depth_img, seg_image, pose, q = self.get_state() + target_seg_id = self.get_target_id(TargetIDRequest()).id + self.policy.update(depth_img, seg_image, target_seg_id, pose, q) r.sleep() + else: + print("Unsupported policy type: "+str(self.policy.policy_type)) # Wait for a zero command to be sent to the robot. rospy.sleep(0.2) diff --git a/src/active_grasp/policy.py b/src/active_grasp/policy.py index aba967c..c1149b2 100644 --- a/src/active_grasp/policy.py +++ b/src/active_grasp/policy.py @@ -67,6 +67,8 @@ class Policy: self.done = False self.info = {} + self.policy_type = "policy" + def calibrate_task_frame(self): xyz = np.r_[self.bbox.center[:2] - 0.15, self.bbox.min[2] - 0.05] self.T_base_task = Transform.from_translation(xyz) @@ -106,6 +108,10 @@ def select_best_grasp(grasps, qualities): class SingleViewPolicy(Policy): + def activate(self, bbox, view_sphere): + super().activate(bbox, view_sphere) + self.policy_type = "single_view" + def update(self, img, seg, target_id, x, q): linear, _ = compute_error(self.x_d, x) if np.linalg.norm(linear) < 0.02: @@ -143,6 +149,7 @@ class MultiViewPolicy(Policy): def activate(self, bbox, view_sphere): super().activate(bbox, view_sphere) self.qual_hist = np.zeros((self.T,) + (40,) * 3, np.float32) + self.policy_type = "multi_view" def integrate(self, img, x, q): self.views.append(x)