successfully added ap policy class
This commit is contained in:
parent
7812730925
commit
5deca92c83
@ -1,6 +1,7 @@
|
|||||||
# Updated installation steps fo my PC environment
|
# Updated installation steps fo my PC environment
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
|
# Install Active Grasp
|
||||||
sudo apt install liborocos-kdl-dev
|
sudo apt install liborocos-kdl-dev
|
||||||
mkdir -p ws/src && cd ws/src
|
mkdir -p ws/src && cd ws/src
|
||||||
git clone https://github.com/0nhc/active_grasp.git
|
git clone https://github.com/0nhc/active_grasp.git
|
||||||
@ -18,6 +19,10 @@ git clone https://github.com/0nhc/robot_helpers.git
|
|||||||
cd ..
|
cd ..
|
||||||
rosdep install --from-paths src --ignore-src -r -y
|
rosdep install --from-paths src --ignore-src -r -y
|
||||||
catkin build
|
catkin build
|
||||||
|
|
||||||
|
# Install Active Perception
|
||||||
|
cd <path-to-your-ws>/src/active_grasp/src/active_grasp/active_perception/modules/module_lib/pointnet2_utils/pointnet2
|
||||||
|
pip install -e .
|
||||||
```
|
```
|
||||||
|
|
||||||
# Closed-Loop Next-Best-View Planning for Target-Driven Grasping
|
# Closed-Loop Next-Best-View Planning for Target-Driven Grasping
|
||||||
|
@ -34,3 +34,7 @@ nbv_grasp:
|
|||||||
max_views: 80
|
max_views: 80
|
||||||
min_gain: 10
|
min_gain: 10
|
||||||
downsample: 10 # 10/20 for sim/hw respectively
|
downsample: 10 # 10/20 for sim/hw respectively
|
||||||
|
|
||||||
|
ap_grasp:
|
||||||
|
max_views: 80
|
||||||
|
ap_config_path: $(find active_grasp)/src/active_grasp/active_perception/configs/local_inference_config.yaml
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
from .policy import register
|
from .policy import register
|
||||||
from .baselines import *
|
from .baselines import *
|
||||||
from .nbv import NextBestView
|
from .nbv import NextBestView
|
||||||
|
from.active_perception_policy import ActivePerceptionPolicy
|
||||||
|
|
||||||
register("initial-view", InitialView)
|
register("initial-view", InitialView)
|
||||||
register("top-view", TopView)
|
register("top-view", TopView)
|
||||||
register("top-trajectory", TopTrajectory)
|
register("top-trajectory", TopTrajectory)
|
||||||
register("fixed-trajectory", FixedTrajectory)
|
register("fixed-trajectory", FixedTrajectory)
|
||||||
register("nbv", NextBestView)
|
register("nbv", NextBestView)
|
||||||
|
register("ap", ActivePerceptionPolicy)
|
@ -13,10 +13,10 @@ for i in range(2):
|
|||||||
PROJECT_ROOT = path
|
PROJECT_ROOT = path
|
||||||
sys.path.append(PROJECT_ROOT)
|
sys.path.append(PROJECT_ROOT)
|
||||||
|
|
||||||
from active_perception.configs.config import ConfigManager
|
from .active_perception.configs.config import ConfigManager
|
||||||
from active_perception.modules.pipeline import Pipeline
|
from .active_perception.modules.pipeline import Pipeline
|
||||||
|
|
||||||
class InferenceEngine():
|
class APInferenceEngine():
|
||||||
RESULTS_DIR_NAME: str = 'results'
|
RESULTS_DIR_NAME: str = 'results'
|
||||||
LOG_DIR_NAME: str = 'log'
|
LOG_DIR_NAME: str = 'log'
|
||||||
|
|
||||||
@ -58,7 +58,6 @@ class InferenceEngine():
|
|||||||
self.pipeline.eval()
|
self.pipeline.eval()
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
output = self.pipeline(data, Pipeline.TEST_MODE)
|
output = self.pipeline(data, Pipeline.TEST_MODE)
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
@ -84,7 +83,7 @@ if __name__ == "__main__":
|
|||||||
}
|
}
|
||||||
|
|
||||||
''' Inference '''
|
''' Inference '''
|
||||||
infenrence_engine = InferenceEngine(args.config)
|
infenrence_engine = APInferenceEngine(args.config)
|
||||||
output = infenrence_engine.inference(test_data)
|
output = infenrence_engine.inference(test_data)
|
||||||
print(output.keys())
|
print(output.keys())
|
||||||
print(output['estimated_delta_rot_6d'])
|
print(output['estimated_delta_rot_6d'])
|
55
src/active_grasp/active_perception_policy.py
Normal file
55
src/active_grasp/active_perception_policy.py
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
import itertools
|
||||||
|
from numba import jit
|
||||||
|
import numpy as np
|
||||||
|
import rospy
|
||||||
|
from .policy import MultiViewPolicy
|
||||||
|
from .timer import Timer
|
||||||
|
|
||||||
|
from .active_perception_demo import APInferenceEngine
|
||||||
|
|
||||||
|
|
||||||
|
class ActivePerceptionPolicy(MultiViewPolicy):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.max_views = rospy.get_param("ap_grasp/max_views")
|
||||||
|
self.ap_config_path = rospy.get_param("ap_grasp/ap_config_path")
|
||||||
|
self.ap_inference_engine = APInferenceEngine(self.ap_config_path)
|
||||||
|
|
||||||
|
def activate(self, bbox, view_sphere):
|
||||||
|
super().activate(bbox, view_sphere)
|
||||||
|
|
||||||
|
def update(self, img, x, q):
|
||||||
|
self.depth_image_to_ap_input(img)
|
||||||
|
# if len(self.views) > self.max_views or self.best_grasp_prediction_is_stable():
|
||||||
|
# self.done = True
|
||||||
|
# else:
|
||||||
|
# with Timer("state_update"):
|
||||||
|
# self.integrate(img, x, q)
|
||||||
|
# with Timer("view_generation"):
|
||||||
|
# views = self.generate_views(q)
|
||||||
|
# with Timer("ig_computation"):
|
||||||
|
# gains = [self.ig_fn(v, self.downsample) for v in views]
|
||||||
|
# with Timer("cost_computation"):
|
||||||
|
# costs = [self.cost_fn(v) for v in views]
|
||||||
|
# utilities = gains / np.sum(gains) - costs / np.sum(costs)
|
||||||
|
# self.vis.ig_views(self.base_frame, self.intrinsic, views, utilities)
|
||||||
|
# i = np.argmax(utilities)
|
||||||
|
# nbv, gain = views[i], gains[i]
|
||||||
|
|
||||||
|
# if gain < self.min_gain and len(self.views) > self.T:
|
||||||
|
# self.done = True
|
||||||
|
|
||||||
|
# self.x_d = nbv
|
||||||
|
|
||||||
|
def depth_image_to_ap_input(self, depth_img):
|
||||||
|
print(self.intrinsic.K)
|
||||||
|
|
||||||
|
def best_grasp_prediction_is_stable(self):
|
||||||
|
if self.best_grasp:
|
||||||
|
t = (self.T_task_base * self.best_grasp.pose).translation
|
||||||
|
i, j, k = (t / self.tsdf.voxel_size).astype(int)
|
||||||
|
qs = self.qual_hist[:, i, j, k]
|
||||||
|
if np.count_nonzero(qs) == self.T and np.mean(qs) > 0.9:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
Loading…
x
Reference in New Issue
Block a user