This commit is contained in:
2024-10-09 16:13:22 +00:00
commit 0ea3f048dc
437 changed files with 44406 additions and 0 deletions

View File

@@ -0,0 +1,13 @@
*.pyc
*.so
*.o
*.egg-info/
/graspnetAPI/dump_full/
/graspnetAPI/eval/acc_novel
/dump_full/
/dist/
/build/
/.vscode/
/graspnms/build/
*.npy
/graspnms/grasp_nms.cpp

View File

@@ -0,0 +1,30 @@
# .readthedocs.yml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
# Build documentation in the docs/ directory with Sphinx
sphinx:
configuration: docs/source/conf.py
# Build documentation with MkDocs
#mkdocs:
# configuration: mkdocs.yml
build:
image: stable
# Optionally build your docs in additional formats such as PDF
formats:
- pdf
- epub
# Optionally set the version of Python and requirements required to build your docs
python:
version: 3.6
install:
- requirements: docs/requirements.txt
- method: pip
path: .
system_packages: true

View File

@@ -0,0 +1,95 @@
# graspnetAPI
[![PyPI version](https://badge.fury.io/py/graspnetAPI.svg)](https://badge.fury.io/py/graspnetAPI)
## Dataset
Visit the [GraspNet Website](http://graspnet.net) to get the dataset.
## Install
You can install using pip.
```bash
pip install graspnetAPI
```
You can also install from source.
```bash
git clone https://github.com/graspnet/graspnetAPI.git
cd graspnetAPI
pip install .
```
## Document
Refer to [online document](https://graspnetapi.readthedocs.io/en/latest/index.html) for more details.
[PDF Document](https://graspnetapi.readthedocs.io/_/downloads/en/latest/pdf/) is available, too.
You can also build the doc manually.
```bash
cd docs
pip install -r requirements.txt
bash build_doc.sh
```
LaTeX is required to build the pdf, but html can be built anyway.
## Grasp Definition
The frame of our gripper is defined as
<div align="center">
<img src="grasp_definition.png", width="400">
</div>
## Examples
```bash
cd examples
# change the path of graspnet root
# How to load labels from graspnet.
python3 exam_loadGrasp.py
# How to convert between 6d and rectangle grasps.
python3 exam_convert.py
# Check the completeness of the data.
python3 exam_check_data.py
# you can also run other examples
```
Please refer to our document for more examples.
## Citation
Please cite these papers in your publications if it helps your research:
```
@article{fang2023robust,
title={Robust grasping across diverse sensor qualities: The GraspNet-1Billion dataset},
author={Fang, Hao-Shu and Gou, Minghao and Wang, Chenxi and Lu, Cewu},
journal={The International Journal of Robotics Research},
year={2023},
publisher={SAGE Publications Sage UK: London, England}
}
@inproceedings{fang2020graspnet,
title={GraspNet-1Billion: A Large-Scale Benchmark for General Object Grasping},
author={Fang, Hao-Shu and Wang, Chenxi and Gou, Minghao and Lu, Cewu},
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition(CVPR)},
pages={11444--11453},
year={2020}
}
```
## Change Log
#### 1.2.6
- Add transformation for Grasp and GraspGroup.
#### 1.2.7
- Add inpainting for depth image.
#### 1.2.8
- Minor fix bug on loadScenePointCloud.

View File

@@ -0,0 +1,19 @@
import os
from tqdm import tqdm
### change the root to you path ####
graspnet_root = '/home/gmh/graspnet'
### change the root to the folder contains rectangle grasp labels ###
rect_labels_root = 'rect_labels'
for sceneId in tqdm(range(190), 'Copying Rectangle Grasp Labels'):
for camera in ['kinect', 'realsense']:
dest_dir = os.path.join(graspnet_root, 'scenes', 'scene_%04d' % sceneId, camera, 'rect')
src_dir = os.path.join(rect_labels_root, 'scene_%04d' % sceneId, camera)
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
for annId in range(256):
src_path = os.path.join(src_dir,'%04d.npy' % annId)
assert os.path.exists(src_path)
os.system('cp {} {}'.format(src_path, dest_dir))

View File

@@ -0,0 +1 @@
/build/

View File

@@ -0,0 +1,20 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

View File

@@ -0,0 +1,8 @@
rm source/graspnetAPI.*
rm source/modules.rst
sphinx-apidoc -o ./source ../graspnetAPI
make clean
make html
make latex
cd build/latex
make

View File

@@ -0,0 +1,35 @@
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=source
set BUILDDIR=build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd

Binary file not shown.

After

Width:  |  Height:  |  Size: 274 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 323 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 276 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 358 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 391 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 413 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 350 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 477 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 346 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 364 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 308 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 681 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 506 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 351 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 119 KiB

View File

@@ -0,0 +1,28 @@
About graspnetAPI
=================
.. image:: _static/graspnetlogo1-blue.png
GraspNet is an open project for general object grasping that is continuously enriched. Currently we release GraspNet-1Billion, a large-scale benchmark for general object grasping, as well as other related areas (e.g. 6D pose estimation, unseen object segmentation, etc.). graspnetAPI is a Python API that assists in loading, parsing and visualizing the annotations in GraspNet. Please visit graspnet website_ for more information on GraspNet, including for the data, paper, and tutorials. The exact format of the annotations is also described on the GraspNet website. In addition to this API, please download both the GraspNet images and annotations in order to run the demo.
.. _website: https://graspnet.net/
Resources
---------
- Documentations_
- PDF_Documentations_
- Website_
- Code_
.. _Code: https://github.com/graspnet/graspnetapi
.. _Documentations: https://graspnetapi.readthedocs.io/en/latest/
.. _PDF_Documentations: https://graspnetapi.readthedocs.io/_/downloads/en/latest/pdf/
.. _Website: https://graspnet.net/
License
-------
graspnetAPI is licensed under the none commercial CC4.0 license [see https://graspnet.net/about]

View File

@@ -0,0 +1,58 @@
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../graspnetAPI'))
# -- Project information -----------------------------------------------------
project = 'graspnetAPI'
copyright = '2021, MVIG, Shanghai Jiao Tong University'
author = 'graspnet'
# The full version, including alpha/beta/rc tags
release = '1.2.11'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
master_doc = 'index'

View File

@@ -0,0 +1,8 @@
.. _example_check_data:
Check Dataset Files
===================
You can check if there is any missing file in the dataset by the following code.
.. literalinclude:: ../../examples/exam_check_data.py

View File

@@ -0,0 +1,62 @@
.. _example_vis:
Convert Labels between rectangle format and 6d format
=====================================================
Get a GraspNet instance.
.. literalinclude:: ../../examples/exam_convert.py
:lines: 4-22
Convert rectangle format to 6d format
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
First, load rectangle labels from dataset.
.. literalinclude:: ../../examples/exam_convert.py
:lines: 24-25
**Convert a single RectGrasp to Grasp.**
.. note:: This conversion may fail due to invalid depth information.
.. literalinclude:: ../../examples/exam_convert.py
:lines: 27-42
Before Conversion:
.. image:: _static/convert_single_before.png
After Conversion:
.. image:: _static/convert_single_after.png
**Convert RectGraspGroup to GraspGroup.**
.. literalinclude:: ../../examples/exam_convert.py
:lines: 44-56
Before Conversion:
.. image:: _static/convert_rect_before.png
After Conversion:
.. image:: _static/convert_rect_after.png
Convert 6d format to rectangle format
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note:: Grasp to RectGrasp conversion is not applicable as only very few 6d grasp can be converted to rectangle grasp.
.. literalinclude:: ../../examples/exam_convert.py
:lines: 58-
Before Conversion:
.. image:: _static/convert_6d_before.png
After Conversion:
.. image:: _static/convert_6d_after.png

View File

@@ -0,0 +1,73 @@
.. _example_eval:
Evaluation
==========
Data Preparation
^^^^^^^^^^^^^^^^
The first step of evaluation is to prepare your own results.
You need to run your code and generate a `GraspGroup` for each image in each scene.
Then call the `save_npy` function of `GraspGroup` to dump the results.
To generate a `GraspGroup` and save it, you can directly input a 2D numpy array for the `GraspGroup` class:
::
gg=GraspGroup(np.array([[score_1, width_1, height_1, depth_1, rotation_matrix_1(9), translation_1(3), object_id_1],
[score_2, width_2, height_2, depth_2, rotation_matrix_2(9), translation_2(3), object_id_2],
...,
[score_N, width_N, height_N, depth_N, rotation_matrix_N(9), translation_N(3), object_id_N]]
))
gg.save_npy(save_path)
where your algorithm predicts N grasp poses for an image. For the `object_id`, you can simply input `0`. For the meaning of other entries, you should refer to the doc for Grasp Label Format-API Loaded Labels
The file structure of dump folder should be as follows:
::
|-- dump_folder
|-- scene_0100
| |-- kinect
| | |
| | --- 0000.npy to 0255.npy
| |
| --- realsense
| |
| --- 0000.npy to 0255.npy
|
|-- scene_0101
|
...
|
--- scene_0189
You can choose to generate dump files for only one camera, there will be no error for doing that.
Evaluation API
^^^^^^^^^^^^^^
Get GraspNetEval instances.
.. literalinclude:: ../../examples/exam_eval.py
:lines: 4-17
Evaluate A Single Scene
-----------------------
.. literalinclude:: ../../examples/exam_eval.py
:lines: 19-23
Evaluate All Scenes
-------------------
.. literalinclude:: ../../examples/exam_eval.py
:lines: 25-27
Evaluate 'Seen' Split
---------------------
.. literalinclude:: ../../examples/exam_eval.py
:lines: 29-31

View File

@@ -0,0 +1,26 @@
.. _example_generate_rectangle_labels:
Generating Rectangle Grasp Labels
=================================
You can generate the rectangle grasp labels by yourself.
Import necessary libs:
.. literalinclude:: ../../examples/exam_generate_rectangle_grasp.py
:lines: 4-11
Setup how many processes to use in generating the labels.
.. literalinclude:: ../../examples/exam_generate_rectangle_grasp.py
:lines: 13-15
The function to generate labels.
.. literalinclude:: ../../examples/exam_generate_rectangle_grasp.py
:lines: 17-31
Run the function for each scene and camera.
.. literalinclude:: ../../examples/exam_generate_rectangle_grasp.py
:lines: 33-52

View File

@@ -0,0 +1,30 @@
.. _example_loadGrasp:
Loading Grasp Labels
====================
Both `6d` and `rect` format labels can be loaded.
First, import relative libs.
.. literalinclude:: ../../examples/exam_loadGrasp.py
:lines: 4-7
Then, get a GraspNet instance and setup parameters.
.. literalinclude:: ../../examples/exam_loadGrasp.py
:lines: 11-19
Load GraspLabel in `6d` format and visulize the result.
.. literalinclude:: ../../examples/exam_loadGrasp.py
:lines: 21-29
.. image:: _static/6d_example.png
Load GraspLabel in `rect` format and visulize the result.
.. literalinclude:: ../../examples/exam_loadGrasp.py
:lines: 31-40
.. image:: _static/rect_example.png

View File

@@ -0,0 +1,112 @@
.. _example_nms:
Apply NMS on Grasps
===================
Get a GraspNet instance.
.. literalinclude:: ../../examples/exam_nms.py
:lines: 4-19
Loading and visualizing grasp lables before NMS.
.. literalinclude:: ../../examples/exam_nms.py
:lines: 21-29
::
6d grasp:
----------
Grasp Group, Number=90332:
Grasp: score:0.9000000357627869, width:0.11247877031564713, height:0.019999999552965164, depth:0.029999999329447746, translation:[-0.09166837 -0.16910084 0.39480919]
rotation:
[[-0.81045675 -0.57493848 0.11227506]
[ 0.49874267 -0.77775514 -0.38256073]
[ 0.30727136 -0.25405255 0.91708326]]
object id:66
Grasp: score:0.9000000357627869, width:0.10030215978622437, height:0.019999999552965164, depth:0.019999999552965164, translation:[-0.09166837 -0.16910084 0.39480919]
rotation:
[[-0.73440629 -0.67870212 0.0033038 ]
[ 0.64608938 -0.70059127 -0.3028869 ]
[ 0.20788456 -0.22030747 0.95302087]]
object id:66
Grasp: score:0.9000000357627869, width:0.08487851172685623, height:0.019999999552965164, depth:0.019999999552965164, translation:[-0.10412319 -0.13797761 0.38312319]
rotation:
[[ 0.03316294 0.78667939 -0.61647028]
[-0.47164679 0.55612743 0.68430364]
[ 0.88116372 0.26806271 0.38947764]]
object id:66
......
Grasp: score:0.9000000357627869, width:0.11909123510122299, height:0.019999999552965164, depth:0.019999999552965164, translation:[-0.05140382 0.11790846 0.48782501]
rotation:
[[-0.71453273 0.63476181 -0.2941435 ]
[-0.07400083 0.3495101 0.93400562]
[ 0.69567728 0.68914449 -0.20276351]]
object id:14
Grasp: score:0.9000000357627869, width:0.10943549126386642, height:0.019999999552965164, depth:0.019999999552965164, translation:[-0.05140382 0.11790846 0.48782501]
rotation:
[[ 0.08162415 0.4604325 -0.88393396]
[-0.52200603 0.77526748 0.3556262 ]
[ 0.84902728 0.4323912 0.30362913]]
object id:14
Grasp: score:0.9000000357627869, width:0.11654743552207947, height:0.019999999552965164, depth:0.009999999776482582, translation:[-0.05140382 0.11790846 0.48782501]
rotation:
[[-0.18380146 0.39686993 -0.89928377]
[-0.61254776 0.66926688 0.42055583]
[ 0.76876676 0.62815309 0.12008961]]
object id:14
------------
.. image:: _static/before_nms.png
Apply nms to GraspGroup and visualizing the result.
.. literalinclude:: ../../examples/exam_nms.py
:lines: 31-38
::
grasp after nms:
----------
Grasp Group, Number=358:
Grasp: score:1.0, width:0.11948642134666443, height:0.019999999552965164, depth:0.03999999910593033, translation:[-0.00363996 0.03692623 0.3311775 ]
rotation:
[[ 0.32641056 -0.8457799 0.42203382]
[-0.68102902 -0.52005678 -0.51550031]
[ 0.65548128 -0.11915252 -0.74575269]]
object id:0
Grasp: score:1.0, width:0.12185929715633392, height:0.019999999552965164, depth:0.009999999776482582, translation:[-0.03486454 0.08384828 0.35117128]
rotation:
[[-0.00487804 -0.8475557 0.53068405]
[-0.27290785 -0.50941664 -0.81609803]
[ 0.96202785 -0.14880882 -0.22881967]]
object id:0
Grasp: score:1.0, width:0.04842342436313629, height:0.019999999552965164, depth:0.019999999552965164, translation:[0.10816982 0.10254505 0.50272578]
rotation:
[[-0.98109186 -0.01696888 -0.19279723]
[-0.1817532 0.42313483 0.88765001]
[ 0.06651681 0.90590769 -0.41821831]]
object id:20
......
Grasp: score:0.9000000357627869, width:0.006192661356180906, height:0.019999999552965164, depth:0.009999999776482582, translation:[0.0122985 0.29616502 0.53319722]
rotation:
[[-0.26423979 0.39734706 0.87880182]
[-0.95826042 -0.00504095 -0.28585231]
[-0.10915259 -0.91765451 0.38209397]]
object id:46
Grasp: score:0.9000000357627869, width:0.024634981527924538, height:0.019999999552965164, depth:0.009999999776482582, translation:[0.11430283 0.18761221 0.51991153]
rotation:
[[-0.17379239 -0.96953499 0.17262182]
[-0.9434278 0.11365268 -0.31149188]
[ 0.28238329 -0.2169912 -0.93443805]]
object id:70
Grasp: score:0.9000000357627869, width:0.03459500893950462, height:0.019999999552965164, depth:0.009999999776482582, translation:[0.02079188 0.11184558 0.50796509]
rotation:
[[ 0.38108557 -0.27480939 0.88275337]
[-0.92043257 -0.20266907 0.33425891]
[ 0.08704928 -0.93989623 -0.33017775]]
object id:20
----------
.. image:: _static/after_nms.png

View File

@@ -0,0 +1,39 @@
.. _example_vis:
Visualization of Dataset
========================
Get a GraspNet instance.
.. literalinclude:: ../../examples/exam_vis.py
:lines: 7-14
Show grasp labels on a object.
.. literalinclude:: ../../examples/exam_vis.py
:lines: 16-17
.. image:: _static/vis_single.png
Show 6D poses of objects in a scene.
.. literalinclude:: ../../examples/exam_vis.py
:lines: 19-20
.. image:: _static/vis_6d.png
Show Rectangle grasp labels in a scene.
.. literalinclude:: ../../examples/exam_vis.py
:lines: 22-23
.. image:: _static/vis_rect.png
Show 6D grasp labels in a scene.
.. literalinclude:: ../../examples/exam_vis.py
:lines: 25-26
.. image:: _static/vis_grasp.png

View File

@@ -0,0 +1,178 @@
.. grasp_format:
Grasp Label Format
==================
Raw Label Format
----------------
The raw label is composed of two parts, i.e. labels for all grasp candidates on each object and collision masks for each scene.
Labels on Objects
^^^^^^^^^^^^^^^^^
The raw label on each object is a list of numpy arrays.
::
>>> import numpy as np
>>> l = np.load('000_labels.npz') # GRASPNET_ROOT/grasp_label/000_labels.npz
>>> l.files
['points', 'offsets', 'collision', 'scores']
>>> l['points'].shape
(3459, 3)
>>> l['offsets'].shape
(3459, 300, 12, 4, 3)
>>> l['collision'].shape
(3459, 300, 12, 4)
>>> l['collision'].dtype
dtype('bool')
>>> l['scores'].shape
(3459, 300, 12, 4)
>>> l['scores'][0][0][0][0]
-1.0
- 'points' records the grasp center point coordinates in model frame.
- 'offsets' records the in-plane rotation, depth and width of the gripper respectively in the last dimension.
- 'collision' records the bool mask for if the grasp pose collides with the model.
- 'scores' records the minimum coefficient of friction between the gripper and object to achieve a stable grasp.
.. note::
In the raw label, the **lower** score the grasp has, the **better** it is. However, -1.0 score means the grasp pose is totally not acceptable.
300, 12, 4 denote view id, in-plane rotation id and depth id respectively. The views are defined here in graspnetAPI/utils/utils.py.
.. literalinclude:: ../../graspnetAPI/utils/utils.py
:lines: 51-58
:linenos:
Collision Masks on Each Scene
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Collision mask on each scene is a list of numpy arrays.
::
>>> import numpy as np
>>> c = np.load('collision_labels.npz') # GRASPNET_ROOT/collision_label/scene_0000/collision_labels.npz
>>> c.files
['arr_0', 'arr_4', 'arr_5', 'arr_2', 'arr_3', 'arr_7', 'arr_1', 'arr_8', 'arr_6']
>>> c['arr_0'].shape
(487, 300, 12, 4)
>>> c['arr_0'].dtype
dtype('bool')
>>> c['arr_0'][10][20][3]
array([ True, True, True, True])
'arr_i' is the collision mask for the `i` th object in the `object_id_list.txt` for each scene whose shape is (num_points, 300, 12, 4).
num_points, 300, 12, 4 denote the number of points in the object, view id, in-plane rotation id and depth id respectively.
Users can refer to :py:func:`graspnetAPI.GraspNet.loadGrasp` for more details of how to use the labels.
API Loaded Labels
-----------------
Dealing with the raw labels are time-consuming and need high familiarity with graspnet.
So the API also provides an easy access to the labels.
By calling :py:func:`graspnetAPI.GraspNet.loadGrasp`, users can get all the positive grasp labels in a scene with their parameters and scores.
There are totally four kinds of data structures for loaded grasp labels: **Grasp**, **GraspGroup**, **RectGrasp** and **RectGraspGroup**.
The internal data format of each class is a numpy array which is more efficient than the Python list.
Their definitions are given in grasp.py
Example Labels
^^^^^^^^^^^^^^
Before looking into the details, an example is given below.
Loading a GraspGroup instance.
.. literalinclude:: ../../examples/exam_grasp_format.py
:lines: 1-27
Users can access elements by index or slice.
.. literalinclude:: ../../examples/exam_grasp_format.py
:lines: 29-35
Each element of GraspGroup is a Grasp instance.
The properties of Grasp can be accessed via provided methods.
.. literalinclude:: ../../examples/exam_grasp_format.py
:lines: 37-46
RectGrasp is the class for rectangle grasps. The format is different from Grasp.
But the provided APIs are similar.
.. literalinclude:: ../../examples/exam_grasp_format.py
:lines: 49-65
6D Grasp
^^^^^^^^
Actually, 17 float numbers are used to define a general 6d grasp.
The width, height, depth, score and attached object id are also part of the definition.
.. note::
In the loaded label, the **higher** score the grasp has, the **better** it is which is different from raw labels. Actually, score = 1.1 - raw_score (which is the coefficient of friction)
.. literalinclude:: ../../graspnetAPI/graspnet.py
:lines: 635-637
:emphasize-lines: 2
The detailed defition of each parameter is shown in the figure.
.. image:: _static/grasp_definition.png
.. literalinclude:: ../../graspnetAPI/grasp.py
:lines: 14-36
6D Grasp Group
^^^^^^^^^^^^^^
Usually, there are a lot of grasps in a scene, :py:class:`GraspGroup` is a class for these grasps.
Compared with :py:class:`Grasp`, :py:class:`GraspGroup` contains a 2D numpy array, the additional dimension is the index for each grasp.
.. literalinclude:: ../../graspnetAPI/grasp.py
:lines: 201-218
Common operations on a list such as indexing, slicing and sorting are implemented.
Besides, one important function is that users can **dump** a GraspGroup into a numpy file and **load** it in another program by calling :py:func:`GraspGroup.save_npy` and :py:func:`GraspGroup.from_npy`.
Rectangle Grasp
^^^^^^^^^^^^^^^
7 float numbers are used to define a general rectangle grasp, i.e. the center point, the open point, height, score and the attached object id.
The detailed definition of each parameter is shown in the figure above and below and the coordinates for center point and open point are in the pixel frame.
.. image:: _static/rect_grasp_definition.png
.. literalinclude:: ../../graspnetAPI/grasp.py
:lines: 553-572
Rectangle Grasp Group
^^^^^^^^^^^^^^^^^^^^^
The format for :py:class:`RectGraspGroup` is similar to that of :py:class:`RectGrasp` and :py:class:`GraspGroup`.
.. literalinclude:: ../../graspnetAPI/grasp.py
:lines: 752-769
.. note::
We recommend users to access and modify the labels by provided functions although users can also manipulate the data directly but it is **Not Recommended**.
Please refer to the Python API for more details.
Grasp and GraspGroup Transformation
-----------------------------------
Users can transform a Grasp or GraspGroup giving a 4x4 matrix.
.. literalinclude:: ../../examples/exam_grasp_format.py
:lines: 67-95
.. image:: _static/transformation.png

View File

@@ -0,0 +1,46 @@
graspnetAPI package
===================
Subpackages
-----------
.. toctree::
:maxdepth: 4
graspnetAPI.utils
Submodules
----------
graspnetAPI.grasp module
------------------------
.. automodule:: graspnetAPI.grasp
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.graspnet module
---------------------------
.. automodule:: graspnetAPI.graspnet
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.graspnet\_eval module
---------------------------------
.. automodule:: graspnetAPI.graspnet_eval
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: graspnetAPI
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,54 @@
graspnetAPI.utils.dexnet.grasping.meshpy package
================================================
Submodules
----------
graspnetAPI.utils.dexnet.grasping.meshpy.mesh module
----------------------------------------------------
.. automodule:: graspnetAPI.utils.dexnet.grasping.meshpy.mesh
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.dexnet.grasping.meshpy.obj\_file module
---------------------------------------------------------
.. automodule:: graspnetAPI.utils.dexnet.grasping.meshpy.obj_file
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.dexnet.grasping.meshpy.sdf module
---------------------------------------------------
.. automodule:: graspnetAPI.utils.dexnet.grasping.meshpy.sdf
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.dexnet.grasping.meshpy.sdf\_file module
---------------------------------------------------------
.. automodule:: graspnetAPI.utils.dexnet.grasping.meshpy.sdf_file
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.dexnet.grasping.meshpy.stable\_pose module
------------------------------------------------------------
.. automodule:: graspnetAPI.utils.dexnet.grasping.meshpy.stable_pose
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: graspnetAPI.utils.dexnet.grasping.meshpy
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,70 @@
graspnetAPI.utils.dexnet.grasping package
=========================================
Subpackages
-----------
.. toctree::
:maxdepth: 4
graspnetAPI.utils.dexnet.grasping.meshpy
Submodules
----------
graspnetAPI.utils.dexnet.grasping.contacts module
-------------------------------------------------
.. automodule:: graspnetAPI.utils.dexnet.grasping.contacts
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.dexnet.grasping.grasp module
----------------------------------------------
.. automodule:: graspnetAPI.utils.dexnet.grasping.grasp
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.dexnet.grasping.grasp\_quality\_config module
---------------------------------------------------------------
.. automodule:: graspnetAPI.utils.dexnet.grasping.grasp_quality_config
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.dexnet.grasping.grasp\_quality\_function module
-----------------------------------------------------------------
.. automodule:: graspnetAPI.utils.dexnet.grasping.grasp_quality_function
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.dexnet.grasping.graspable\_object module
----------------------------------------------------------
.. automodule:: graspnetAPI.utils.dexnet.grasping.graspable_object
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.dexnet.grasping.quality module
------------------------------------------------
.. automodule:: graspnetAPI.utils.dexnet.grasping.quality
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: graspnetAPI.utils.dexnet.grasping
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,38 @@
graspnetAPI.utils.dexnet package
================================
Subpackages
-----------
.. toctree::
:maxdepth: 4
graspnetAPI.utils.dexnet.grasping
Submodules
----------
graspnetAPI.utils.dexnet.abstractstatic module
----------------------------------------------
.. automodule:: graspnetAPI.utils.dexnet.abstractstatic
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.dexnet.constants module
-----------------------------------------
.. automodule:: graspnetAPI.utils.dexnet.constants
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: graspnetAPI.utils.dexnet
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,86 @@
graspnetAPI.utils package
=========================
Subpackages
-----------
.. toctree::
:maxdepth: 4
graspnetAPI.utils.dexnet
Submodules
----------
graspnetAPI.utils.config module
-------------------------------
.. automodule:: graspnetAPI.utils.config
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.eval\_utils module
------------------------------------
.. automodule:: graspnetAPI.utils.eval_utils
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.pose module
-----------------------------
.. automodule:: graspnetAPI.utils.pose
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.rotation module
---------------------------------
.. automodule:: graspnetAPI.utils.rotation
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.trans3d module
--------------------------------
.. automodule:: graspnetAPI.utils.trans3d
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.utils module
------------------------------
.. automodule:: graspnetAPI.utils.utils
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.vis module
----------------------------
.. automodule:: graspnetAPI.utils.vis
:members:
:undoc-members:
:show-inheritance:
graspnetAPI.utils.xmlhandler module
-----------------------------------
.. automodule:: graspnetAPI.utils.xmlhandler
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: graspnetAPI.utils
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,48 @@
.. graspnetAPI documentation master file, created by
sphinx-quickstart on Tue Nov 3 13:04:51 2020.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to graspnetAPI's documentation!
=======================================
.. toctree::
:maxdepth: 2
:caption: Contents:
about
install
grasp_format
Examples
=========
.. toctree::
:maxdepth: 1
:caption: Examples
example_check_data
example_generate_rectangle_labels
example_loadGrasp
example_vis
example_nms
example_convert
example_eval
Python API
==========
.. toctree::
:maxdepth: 1
:caption: Modules
graspnetAPI
graspnetAPI.utils
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@@ -0,0 +1,61 @@
Installation
============
.. note::
Only Python 3 on Linux is supported.
Prerequisites
^^^^^^^^^^^^^
Python version under 3.6 is not tested.
Dataset
^^^^^^^
Download
--------
Download the dataset at https://graspnet.net/datasets.html
Unzip
-----
Unzip the files as shown in https://graspnet.net/datasets.html.
Rectangle Grasp Labels
----------------------
Rectangle grasp labels are optional if you need labels in this format.
You can both generate the labels or download the file_.
If you want to generate the labels by yourself, you may refer to :ref:`example_generate_rectangle_labels`.
.. note::
Generating rectangle grasp labels may take a long time.
After generating the labels or unzipping the labels, you need to run copy_rect_labels.py_ to copy rectangle grasp labels to corresponding folders.
.. _copy_rect_labels.py: https://github.com/graspnet/graspnetAPI/blob/master/copy_rect_labels.py
.. _file: https://graspnet.net/datasets.html
Dexnet Model Cache
------------------
Dexnet model cache is optional without which the evaluation will be much slower(about 10x time slower).
You can both download the file or generate it by yourself by running gen_pickle_dexmodel.py_(recommended).
.. _gen_pickle_dexmodel.py: https://github.com/graspnet/graspnetAPI/blob/master/gen_pickle_dexmodel.py
Install API
^^^^^^^^^^^
You may install using pip::
pip install graspnetAPI
You can also install from source::
git clone https://github.com/graspnet/graspnetAPI.git
cd graspnetAPI/
pip install .

View File

@@ -0,0 +1,7 @@
graspnetAPI
===========
.. toctree::
:maxdepth: 4
graspnetAPI

View File

@@ -0,0 +1,22 @@
__author__ = 'mhgou'
__version__ = '1.0'
from graspnetAPI import GraspNet
# GraspNetAPI example for checking the data completeness.
# change the graspnet_root path
if __name__ == '__main__':
####################################################################
graspnet_root = '/home/gmh/graspnet' ### ROOT PATH FOR GRASPNET ###
####################################################################
g = GraspNet(graspnet_root, 'kinect', 'all')
if g.checkDataCompleteness():
print('Check for kinect passed')
g = GraspNet(graspnet_root, 'realsense', 'all')
if g.checkDataCompleteness():
print('Check for realsense passed')

View File

@@ -0,0 +1,76 @@
__author__ = 'mhgou'
__version__ = '1.0'
from graspnetAPI import GraspNet
import cv2
import open3d as o3d
# GraspNetAPI example for checking the data completeness.
# change the graspnet_root path
camera = 'kinect'
sceneId = 5
annId = 3
####################################################################
graspnet_root = '/home/gmh/graspnet' # ROOT PATH FOR GRASPNET
####################################################################
g = GraspNet(graspnet_root, camera = camera, split = 'all')
bgr = g.loadBGR(sceneId = sceneId, camera = camera, annId = annId)
depth = g.loadDepth(sceneId = sceneId, camera = camera, annId = annId)
# Rect to 6d
rect_grasp_group = g.loadGrasp(sceneId = sceneId, camera = camera, annId = annId, fric_coef_thresh = 0.2, format = 'rect')
# RectGrasp to Grasp
rect_grasp = rect_grasp_group.random_sample(1)[0]
img = rect_grasp.to_opencv_image(bgr)
cv2.imshow('rect grasp', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
grasp = rect_grasp.to_grasp(camera, depth)
if grasp is not None:
geometry = []
geometry.append(g.loadScenePointCloud(sceneId, camera, annId))
geometry.append(grasp.to_open3d_geometry())
o3d.visualization.draw_geometries(geometry)
else:
print('No result because the depth is invalid, please try again!')
# RectGraspGroup to GraspGroup
sample_rect_grasp_group = rect_grasp_group.random_sample(20)
img = sample_rect_grasp_group.to_opencv_image(bgr)
cv2.imshow('rect grasp', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
grasp_group = sample_rect_grasp_group.to_grasp_group(camera, depth)
if grasp_group is not None:
geometry = []
geometry.append(g.loadScenePointCloud(sceneId, camera, annId))
geometry += grasp_group.to_open3d_geometry_list()
o3d.visualization.draw_geometries(geometry)
# 6d to Rect
_6d_grasp_group = g.loadGrasp(sceneId = sceneId, camera = camera, annId = annId, fric_coef_thresh = 0.2, format = '6d')
# Grasp to RectGrasp conversion is not applicable as only very few 6d grasp can be converted to rectangle grasp.
# GraspGroup to RectGraspGroup
sample_6d_grasp_group = _6d_grasp_group.random_sample(20)
geometry = []
geometry.append(g.loadScenePointCloud(sceneId, camera, annId))
geometry += sample_6d_grasp_group.to_open3d_geometry_list()
o3d.visualization.draw_geometries(geometry)
rect_grasp_group = _6d_grasp_group.to_rect_grasp_group(camera)
img = rect_grasp_group.to_opencv_image(bgr)
cv2.imshow('rect grasps', img)
cv2.waitKey(0)
cv2.destroyAllWindows()

View File

@@ -0,0 +1,31 @@
__author__ = 'mhgou'
__version__ = '1.0'
# GraspNetAPI example for evaluate grasps for a scene.
# change the graspnet_root path
import numpy as np
from graspnetAPI import GraspNetEval
####################################################################
graspnet_root = '/home/gmh/graspnet' # ROOT PATH FOR GRASPNET
dump_folder = '/home/gmh/git/rgbd_graspnet/dump_affordance_iounan/' # ROOT PATH FOR DUMP
####################################################################
sceneId = 121
camera = 'kinect'
ge_k = GraspNetEval(root = graspnet_root, camera = 'kinect', split = 'test')
ge_r = GraspNetEval(root = graspnet_root, camera = 'realsense', split = 'test')
# eval a single scene
print('Evaluating scene:{}, camera:{}'.format(sceneId, camera))
acc = ge_k.eval_scene(scene_id = sceneId, dump_folder = dump_folder)
np_acc = np.array(acc)
print('mean accuracy:{}'.format(np.mean(np_acc)))
# # eval all data for kinect
# print('Evaluating kinect')
# res, ap = ge_k.eval_all(dump_folder, proc = 24)
# # eval 'seen' split for realsense
# print('Evaluating realsense')
# res, ap = ge_r.eval_seen(dump_folder, proc = 24)

View File

@@ -0,0 +1,52 @@
__author__ = 'mhgou'
__version__ = '1.0'
# GraspNetAPI example for generating rectangle grasp from 6d grasp.
# change the graspnet_root path and NUM_PROCESS
from graspnetAPI import GraspNet
from graspnetAPI.graspnet import TOTAL_SCENE_NUM
import os
import numpy as np
from tqdm import tqdm
######################################################################
NUM_PROCESS = 24 # change NUM_PROCESS to the number of cores to use. #
######################################################################
def generate_scene_rectangle_grasp(sceneId, dump_folder, camera):
g = GraspNet(graspnet_root, camera=camera, split='all')
objIds = g.getObjIds(sceneIds = sceneId)
grasp_labels = g.loadGraspLabels(objIds)
collision_labels = g.loadCollisionLabels(sceneIds = sceneId)
scene_dir = os.path.join(dump_folder,'scene_%04d' % sceneId)
if not os.path.exists(scene_dir):
os.mkdir(scene_dir)
camera_dir = os.path.join(scene_dir, camera)
if not os.path.exists(camera_dir):
os.mkdir(camera_dir)
for annId in tqdm(range(256), 'Scene:{}, Camera:{}'.format(sceneId, camera)):
_6d_grasp = g.loadGrasp(sceneId = sceneId, annId = annId, format = '6d', camera = camera, grasp_labels = grasp_labels, collision_labels = collision_labels, fric_coef_thresh = 1.0)
rect_grasp_group = _6d_grasp.to_rect_grasp_group(camera)
rect_grasp_group.save_npy(os.path.join(camera_dir, '%04d.npy' % annId))
if __name__ == '__main__':
####################################################################
graspnet_root = '/home/minghao/graspnet' # ROOT PATH FOR GRASPNET ##
####################################################################
dump_folder = 'rect_labels'
if not os.path.exists(dump_folder):
os.mkdir(dump_folder)
if NUM_PROCESS > 1:
from multiprocessing import Pool
pool = Pool(24)
for camera in ['realsense', 'kinect']:
for sceneId in range(120):
pool.apply_async(func = generate_scene_rectangle_grasp, args = (sceneId, dump_folder, camera))
pool.close()
pool.join()
else:
generate_scene_rectangle_grasp(sceneId, dump_folder, camera)

View File

@@ -0,0 +1,95 @@
__author__ = 'mhgou'
__version__ = '1.0'
from graspnetAPI import GraspNet, Grasp, GraspGroup
import open3d as o3d
import cv2
import numpy as np
# GraspNetAPI example for loading grasp for a scene.
# change the graspnet_root path
####################################################################
graspnet_root = '/disk1/graspnet' # ROOT PATH FOR GRASPNET
####################################################################
sceneId = 1
annId = 3
# initialize a GraspNet instance
g = GraspNet(graspnet_root, camera='kinect', split='train')
# load grasps of scene 1 with annotation id = 3, camera = kinect and fric_coef_thresh = 0.2
_6d_grasp = g.loadGrasp(sceneId = sceneId, annId = annId, format = '6d', camera = 'kinect', fric_coef_thresh = 0.2)
print('6d grasp:\n{}'.format(_6d_grasp))
# _6d_grasp is an GraspGroup instance defined in grasp.py
print('_6d_grasp:\n{}'.format(_6d_grasp))
# index
grasp = _6d_grasp[0]
print('_6d_grasp[0](grasp):\n{}'.format(grasp))
# slice
print('_6d_grasp[0:2]:\n{}'.format(_6d_grasp[0:2]))
print('_6d_grasp[[0,1]]:\n{}'.format(_6d_grasp[[0,1]]))
# grasp is a Grasp instance defined in grasp.py
# access and set properties
print('grasp.translation={}'.format(grasp.translation))
grasp.translation = np.array([1.0, 2.0, 3.0])
print('After modification, grasp.translation={}'.format(grasp.translation))
print('grasp.rotation_matrix={}'.format(grasp.rotation_matrix))
grasp.rotation_matrix = np.eye(3).reshape((9))
print('After modification, grasp.rotation_matrix={}'.format(grasp.rotation_matrix))
print('grasp.width={}, height:{}, depth:{}, score:{}'.format(grasp.width, grasp.height, grasp.depth, grasp.score))
print('More operation on Grasp and GraspGroup can be seen in the API document')
# load rectangle grasps of scene 1 with annotation id = 3, camera = realsense and fric_coef_thresh = 0.2
rect_grasp_group = g.loadGrasp(sceneId = sceneId, annId = annId, format = 'rect', camera = 'realsense', fric_coef_thresh = 0.2)
print('rectangle grasp group:\n{}'.format(rect_grasp_group))
# rect_grasp is an RectGraspGroup instance defined in grasp.py
print('rect_grasp_group:\n{}'.format(rect_grasp_group))
# index
rect_grasp = rect_grasp_group[0]
print('rect_grasp_group[0](rect_grasp):\n{}'.format(rect_grasp))
# slice
print('rect_grasp_group[0:2]:\n{}'.format(rect_grasp_group[0:2]))
print('rect_grasp_group[[0,1]]:\n{}'.format(rect_grasp_group[[0,1]]))
# properties of rect_grasp
print('rect_grasp.center_point:{}, open_point:{}, height:{}, score:{}'.format(rect_grasp.center_point, rect_grasp.open_point, rect_grasp.height, rect_grasp.score))
# transform grasp
g = Grasp() # simple Grasp
frame = o3d.geometry.TriangleMesh.create_coordinate_frame(0.1)
# Grasp before transformation
o3d.visualization.draw_geometries([g.to_open3d_geometry(), frame])
g.translation = np.array((0,0,0.01))
# setup a transformation matrix
T = np.eye(4)
T[:3,3] = np.array((0.01, 0.02, 0.03))
T[:3,:3] = np.array([[0,0,1.0],[1,0,0],[0,1,0]])
g.transform(T)
# Grasp after transformation
o3d.visualization.draw_geometries([g.to_open3d_geometry(), frame])
g1 = Grasp()
gg = GraspGroup()
gg.add(g)
gg.add(g1)
# GraspGroup before transformation
o3d.visualization.draw_geometries([*gg.to_open3d_geometry_list(), frame])
gg.transform(T)
# GraspGroup after transformation
o3d.visualization.draw_geometries([*gg.to_open3d_geometry_list(), frame])

View File

@@ -0,0 +1,40 @@
__author__ = 'mhgou'
__version__ = '1.0'
from graspnetAPI import GraspNet
import open3d as o3d
import cv2
# GraspNetAPI example for loading grasp for a scene.
# change the graspnet_root path
####################################################################
graspnet_root = '/mnt/h/AI/Datasets/graspnet-1billion/test_seen' # ROOT PATH FOR GRASPNET
####################################################################
sceneId = 100
annId = 3
# initialize a GraspNet instance
g = GraspNet(graspnet_root, camera='kinect', split='test_seen')
# load grasps of scene 1 with annotation id = 3, camera = kinect and fric_coef_thresh = 0.2
_6d_grasp = g.loadGrasp(sceneId = sceneId, annId = annId, format = '6d', camera = 'kinect', fric_coef_thresh = 0.2)
print('6d grasp:\n{}'.format(_6d_grasp))
# visualize the grasps using open3d
geometries = []
geometries.append(g.loadScenePointCloud(sceneId = sceneId, annId = annId, camera = 'kinect'))
geometries += _6d_grasp.random_sample(numGrasp = 20).to_open3d_geometry_list()
o3d.visualization.draw_geometries(geometries)
# load rectangle grasps of scene 1 with annotation id = 3, camera = realsense and fric_coef_thresh = 0.2
rect_grasp = g.loadGrasp(sceneId = sceneId, annId = annId, format = 'rect', camera = 'realsense', fric_coef_thresh = 0.2)
print('rectangle grasp:\n{}'.format(rect_grasp))
# visualize the rectanglegrasps using opencv
bgr = g.loadBGR(sceneId = sceneId, annId = annId, camera = 'realsense')
img = rect_grasp.to_opencv_image(bgr, numGrasp = 20)
cv2.imshow('rectangle grasps', img)
cv2.waitKey(0)
cv2.destroyAllWindows()

View File

@@ -0,0 +1,38 @@
__author__ = 'mhgou'
__version__ = '1.0'
# GraspNetAPI example for grasp nms.
# change the graspnet_root path
####################################################################
graspnet_root = '/home/gmh/graspnet' # ROOT PATH FOR GRASPNET
####################################################################
sceneId = 1
annId = 3
from graspnetAPI import GraspNet
import open3d as o3d
import cv2
# initialize a GraspNet instance
g = GraspNet(graspnet_root, camera='kinect', split='train')
# load grasps of scene 1 with annotation id = 3, camera = kinect and fric_coef_thresh = 0.2
_6d_grasp = g.loadGrasp(sceneId = sceneId, annId = annId, format = '6d', camera = 'kinect', fric_coef_thresh = 0.2)
print('6d grasp:\n{}'.format(_6d_grasp))
# visualize the grasps using open3d
geometries = []
geometries.append(g.loadScenePointCloud(sceneId = sceneId, annId = annId, camera = 'kinect'))
geometries += _6d_grasp.random_sample(numGrasp = 1000).to_open3d_geometry_list()
o3d.visualization.draw_geometries(geometries)
nms_grasp = _6d_grasp.nms(translation_thresh = 0.1, rotation_thresh = 30 / 180.0 * 3.1416)
print('grasp after nms:\n{}'.format(nms_grasp))
# visualize the grasps using open3d
geometries = []
geometries.append(g.loadScenePointCloud(sceneId = sceneId, annId = annId, camera = 'kinect'))
geometries += nms_grasp.to_open3d_geometry_list()
o3d.visualization.draw_geometries(geometries)

View File

@@ -0,0 +1,26 @@
__author__ = 'mhgou'
__version__ = '1.0'
# GraspNetAPI example for visualization.
# change the graspnet_root path
####################################################################
graspnet_root = '/mnt/h/AI/Datasets/graspnet-1billion/test_seen' # ROOT PATH FOR GRASPNET
####################################################################
from graspnetAPI import GraspNet
# initialize a GraspNet instance
g = GraspNet(graspnet_root, camera='kinect', split='test_seen')
# show object grasps
g.showObjGrasp(objIds = 0, show=True)
# show 6d poses
g.show6DPose(sceneIds = 0, show = True)
# show scene rectangle grasps
g.showSceneGrasp(sceneId = 0, camera = 'realsense', annId = 0, format = 'rect', numGrasp = 20)
# show scene 6d grasps(You may need to wait several minutes)
g.showSceneGrasp(sceneId = 4, camera = 'kinect', annId = 2, format = '6d')

View File

@@ -0,0 +1,21 @@
__author__ = 'mhgou'
from graspnetAPI.utils.eval_utils import load_dexnet_model
from tqdm import tqdm
import pickle
import os
##### Change the root to your path #####
graspnet_root = '/home/gmh/graspnet'
##### Do NOT change this folder name #####
dex_folder = 'dex_models'
if not os.path.exists(dex_folder):
os.makedirs(dex_folder)
model_dir = os.path.join(graspnet_root, 'models')
for obj_id in tqdm(range(88), 'dump models'):
dex_model = load_dexnet_model(os.path.join(model_dir, '%03d' % obj_id, 'textured'))
with open(os.path.join(dex_folder, '%03d.pkl' % obj_id), 'wb') as f:
pickle.dump(dex_model, f)

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

View File

@@ -0,0 +1,6 @@
__author__ = 'mhgou'
__version__ = '1.2.11'
from .graspnet import GraspNet
from .graspnet_eval import GraspNetEval
from .grasp import Grasp, GraspGroup, RectGrasp, RectGraspGroup

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,801 @@
__author__ = 'hsfang, mhgou, cxwang'
# Interface for accessing the GraspNet-1Billion dataset.
# Description and part of the codes modified from MSCOCO api
# GraspNet is an open project for general object grasping that is continuously enriched.
# Currently we release GraspNet-1Billion, a large-scale benchmark for general object grasping,
# as well as other related areas (e.g. 6D pose estimation, unseen object segmentation, etc.).
# graspnetapi is a Python API that # assists in loading, parsing and visualizing the
# annotations in GraspNet. Please visit https://graspnet.net/ for more information on GraspNet,
# including for the data, paper, and tutorials. The exact format of the annotations
# is also described on the GraspNet website. For example usage of the graspnetapi
# please see graspnetapi_demo.ipynb. In addition to this API, please download both
# the GraspNet images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *grasping* and *6d pose* annotations. In the case of
# 6d poses not all functions are defined (e.g. collisions are undefined).
# The following API functions are defined:
# GraspNet - GraspNet api class that loads GraspNet annotation file and prepare data structures.
# checkDataCompleteness- Check the file completeness of the dataset.
# getSceneIds - Get scene ids that satisfy given filter conditions.
# getObjIds - Get obj ids that satisfy given filter conditions.
# getDataIds - Get data ids that satisfy given filter conditions.
# loadBGR - Load image in BGR format.
# loadRGB - Load image in RGB format.
# loadDepth - Load depth image.
# loadMask - Load the segmentation masks.
# loadSceneModels - Load object models in a scene.
# loadScenePointCloud - Load point cloud constructed by the depth and color image.
# loadWorkSpace - Load the workspace bounding box.
# loadGraspLabels - Load grasp labels with the specified object ids.
# loadObjModels - Load object 3d mesh model with the specified object ids.
# loadObjTrimesh - Load object 3d mesh in Trimesh format.
# loadCollisionLabels - Load collision labels with the specified scene ids.
# loadGrasp - Load grasp labels with the specified scene and annotation id.
# loadData - Load data path with the specified data ids.
# showObjGrasp - Save visualization of the grasp pose of specified object ids.
# showSceneGrasp - Save visualization of the grasp pose of specified scene ids.
# show6DPose - Save visualization of the 6d pose of specified scene ids, project obj models onto pointcloud
# Throughout the API "ann"=annotation, "obj"=object, and "img"=image.
# GraspNet Toolbox. version 1.0
# Data, paper, and tutorials available at: https://graspnet.net/
# Code written by Hao-Shu Fang, Minghao Gou and Chenxi Wang, 2020.
# Licensed under the none commercial CC4.0 license [see https://graspnet.net/about]
import os
import numpy as np
from tqdm import tqdm
import open3d as o3d
import cv2
import trimesh
from .grasp import Grasp, GraspGroup, RectGrasp, RectGraspGroup, RECT_GRASP_ARRAY_LEN
from .utils.utils import transform_points, parse_posevector
from .utils.xmlhandler import xmlReader
TOTAL_SCENE_NUM = 190
GRASP_HEIGHT = 0.02
def _isArrayLike(obj):
return hasattr(obj, '__iter__') and hasattr(obj, '__len__')
class GraspNet():
def __init__(self, root, camera='kinect', split='train', sceneIds=[]):
'''
graspnetAPI main class.
**input**:
- camera: string of type of camera: "kinect" or "realsense"
- split: string of type of split of dataset: "all", "train", "test", "test_seen", "test_similar", "test_novel" or "custom"
- sceneIds: list of custom scene ids.
'''
assert camera in ['kinect', 'realsense'], 'camera should be kinect or realsense'
assert split in ['all', 'train', 'test', 'test_seen', 'test_similar', 'test_novel', "custom"], 'split should be all/train/test/test_seen/test_similar/test_novel'
self.root = root
self.camera = camera
self.split = split
self.collisionLabels = {}
if split == 'all':
self.sceneIds = list(range(TOTAL_SCENE_NUM))
elif split == 'train':
self.sceneIds = list(range(100))
elif split == 'test':
self.sceneIds = list(range(100, 190))
elif split == 'test_seen':
self.sceneIds = list(range(100, 130))
elif split == 'test_similar':
self.sceneIds = list(range(130, 160))
elif split == 'test_novel':
self.sceneIds = list(range(160, 190))
elif split == "custom":
self.sceneIds = sceneIds
self.rgbPath = []
self.depthPath = []
self.segLabelPath = []
self.metaPath = []
self.rectLabelPath = []
self.sceneName = []
self.annId = []
for i in tqdm(self.sceneIds, desc='Loading data path...'):
for img_num in range(256):
self.rgbPath.append(os.path.join(
root, 'scenes', 'scene_'+str(i).zfill(4), camera, 'rgb', str(img_num).zfill(4)+'.png'))
self.depthPath.append(os.path.join(
root, 'scenes', 'scene_'+str(i).zfill(4), camera, 'depth', str(img_num).zfill(4)+'.png'))
self.segLabelPath.append(os.path.join(
root, 'scenes', 'scene_'+str(i).zfill(4), camera, 'label', str(img_num).zfill(4)+'.png'))
self.metaPath.append(os.path.join(
root, 'scenes', 'scene_'+str(i).zfill(4), camera, 'meta', str(img_num).zfill(4)+'.mat'))
self.rectLabelPath.append(os.path.join(
root, 'scenes', 'scene_'+str(i).zfill(4), camera, 'rect', str(img_num).zfill(4)+'.npy'))
self.sceneName.append('scene_'+str(i).zfill(4))
self.annId.append(img_num)
self.objIds = self.getObjIds(self.sceneIds)
def __len__(self):
return len(self.depthPath)
def checkDataCompleteness(self):
'''
Check whether the dataset files are complete.
**Output:**
- bool, True for complete, False for not complete.
'''
error_flag = False
for obj_id in tqdm(range(88), 'Checking Models'):
if not os.path.exists(os.path.join(self.root, 'models','%03d' % obj_id, 'nontextured.ply')):
error_flag = True
print('No nontextured.ply For Object {}'.format(obj_id))
if not os.path.exists(os.path.join(self.root, 'models','%03d' % obj_id, 'textured.sdf')):
error_flag = True
print('No textured.sdf For Object {}'.format(obj_id))
if not os.path.exists(os.path.join(self.root, 'models','%03d' % obj_id, 'textured.obj')):
error_flag = True
print('No textured.obj For Object {}'.format(obj_id))
for obj_id in tqdm(range(88), 'Checking Grasp Labels'):
if not os.path.exists(os.path.join(self.root, 'grasp_label', '%03d_labels.npz' % obj_id)):
error_flag = True
print('No Grasp Label For Object {}'.format(obj_id))
for sceneId in tqdm(self.sceneIds, 'Checking Collosion Labels'):
if not os.path.exists(os.path.join(self.root, 'collision_label', 'scene_%04d' % sceneId, 'collision_labels.npz')):
error_flag = True
print('No Collision Labels For Scene {}'.format(sceneId))
for sceneId in tqdm(self.sceneIds, 'Checking Scene Datas'):
scene_dir = os.path.join(self.root, 'scenes', 'scene_%04d' % sceneId)
if not os.path.exists(os.path.join(scene_dir,'object_id_list.txt')):
error_flag = True
print('No Object Id List For Scene {}'.format(sceneId))
if not os.path.exists(os.path.join(scene_dir,'rs_wrt_kn.npy')):
error_flag = True
print('No rs_wrt_kn.npy For Scene {}'.format(sceneId))
for camera in [self.camera]:
camera_dir = os.path.join(scene_dir, camera)
if not os.path.exists(os.path.join(camera_dir,'cam0_wrt_table.npy')):
error_flag = True
print('No cam0_wrt_table.npy For Scene {}, Camera:{}'.format(sceneId, camera))
if not os.path.exists(os.path.join(camera_dir,'camera_poses.npy')):
error_flag = True
print('No camera_poses.npy For Scene {}, Camera:{}'.format(sceneId, camera))
if not os.path.exists(os.path.join(camera_dir,'camK.npy')):
error_flag = True
print('No camK.npy For Scene {}, Camera:{}'.format(sceneId, camera))
for annId in range(256):
if not os.path.exists(os.path.join(camera_dir,'rgb','%04d.png' % annId)):
error_flag = True
print('No RGB Image For Scene {}, Camera:{}, annotion:{}'.format(sceneId, camera, annId))
if not os.path.exists(os.path.join(camera_dir,'depth','%04d.png' % annId)):
error_flag = True
print('No Depth Image For Scene {}, Camera:{}, annotion:{}'.format(sceneId, camera, annId))
if not os.path.exists(os.path.join(camera_dir,'label','%04d.png' % annId)):
error_flag = True
print('No Mask Label image For Scene {}, Camera:{}, annotion:{}'.format(sceneId, camera, annId))
if not os.path.exists(os.path.join(camera_dir,'meta','%04d.mat' % annId)):
error_flag = True
print('No Meta Data For Scene {}, Camera:{}, annotion:{}'.format(sceneId, camera, annId))
if not os.path.exists(os.path.join(camera_dir,'annotations','%04d.xml' % annId)):
error_flag = True
print('No Annotations For Scene {}, Camera:{}, annotion:{}'.format(sceneId, camera, annId))
if not os.path.exists(os.path.join(camera_dir,'rect','%04d.npy' % annId)):
error_flag = True
print('No Rectangle Labels For Scene {}, Camera:{}, annotion:{}'.format(sceneId, camera, annId))
return not error_flag
def getSceneIds(self, objIds=None):
'''
**Input:**
- objIds: int or list of int of the object ids.
**Output:**
- a list of int of the scene ids that contains **all** the objects.
'''
if objIds is None:
return self.sceneIds
assert _isArrayLike(objIds) or isinstance(objIds, int), 'objIds must be integer or a list/numpy array of integers'
objIds = objIds if _isArrayLike(objIds) else [objIds]
sceneIds = []
for i in self.sceneIds:
f = open(os.path.join(self.root, 'scenes', 'scene_' + str(i).zfill(4), 'object_id_list.txt'))
idxs = [int(line.strip()) for line in f.readlines()]
check = all(item in idxs for item in objIds)
if check:
sceneIds.append(i)
return sceneIds
def getObjIds(self, sceneIds=None):
'''
**Input:**
- sceneIds: int or list of int of the scene ids.
**Output:**
- a list of int of the object ids in the given scenes.
'''
# get object ids in the given scenes
if sceneIds is None:
return self.objIds
assert _isArrayLike(sceneIds) or isinstance(sceneIds, int), 'sceneIds must be an integer or a list/numpy array of integers'
sceneIds = sceneIds if _isArrayLike(sceneIds) else [sceneIds]
objIds = []
for i in sceneIds:
f = open(os.path.join(self.root, 'scenes', 'scene_' + str(i).zfill(4), 'object_id_list.txt'))
idxs = [int(line.strip()) for line in f.readlines()]
objIds = list(set(objIds+idxs))
return objIds
def getDataIds(self, sceneIds=None):
'''
**Input:**
- sceneIds:int or list of int of the scenes ids.
**Output:**
- a list of int of the data ids. Data could be accessed by calling self.loadData(ids).
'''
# get index for datapath that contains the given scenes
if sceneIds is None:
return list(range(len(self.sceneName)))
ids = []
indexPosList = []
for i in sceneIds:
indexPosList += [ j for j in range(0,len(self.sceneName),256) if self.sceneName[j] == 'scene_'+str(i).zfill(4) ]
for idx in indexPosList:
ids += list(range(idx, idx+256))
return ids
def loadGraspLabels(self, objIds=None):
'''
**Input:**
- objIds: int or list of int of the object ids.
**Output:**
- a dict of grasplabels of each object.
'''
# load object-level grasp labels of the given obj ids
objIds = self.objIds if objIds is None else objIds
assert _isArrayLike(objIds) or isinstance(objIds, int), 'objIds must be an integer or a list/numpy array of integers'
objIds = objIds if _isArrayLike(objIds) else [objIds]
graspLabels = {}
for i in tqdm(objIds, desc='Loading grasping labels...'):
file = np.load(os.path.join(self.root, 'grasp_label', '{}_labels.npz'.format(str(i).zfill(3))))
graspLabels[i] = (file['points'].astype(np.float32), file['offsets'].astype(np.float32), file['scores'].astype(np.float32))
return graspLabels
def loadObjModels(self, objIds=None):
'''
**Function:**
- load object 3D models of the given obj ids
**Input:**
- objIDs: int or list of int of the object ids
**Output:**
- a list of open3d.geometry.PointCloud of the models
'''
objIds = self.objIds if objIds is None else objIds
assert _isArrayLike(objIds) or isinstance(objIds, int), 'objIds must be an integer or a list/numpy array of integers'
objIds = objIds if _isArrayLike(objIds) else [objIds]
models = []
for i in tqdm(objIds, desc='Loading objects...'):
plyfile = os.path.join(self.root, 'models','%03d' % i, 'nontextured.ply')
models.append(o3d.io.read_point_cloud(plyfile))
return models
def loadObjTrimesh(self, objIds=None):
'''
**Function:**
- load object 3D trimesh of the given obj ids
**Input:**
- objIDs: int or list of int of the object ids
**Output:**
- a list of trimesh.Trimesh of the models
'''
objIds = self.objIds if objIds is None else objIds
assert _isArrayLike(objIds) or isinstance(objIds, int), 'objIds must be an integer or a list/numpy array of integers'
objIds = objIds if _isArrayLike(objIds) else [objIds]
models = []
for i in tqdm(objIds, desc='Loading objects...'):
plyfile = os.path.join(self.root, 'models','%03d' % i, 'nontextured.ply')
models.append(trimesh.load(plyfile))
return models
def loadCollisionLabels(self, sceneIds=None):
'''
**Input:**
- sceneIds: int or list of int of the scene ids.
**Output:**
- dict of the collision labels.
'''
sceneIds = self.sceneIds if sceneIds is None else sceneIds
assert _isArrayLike(sceneIds) or isinstance(sceneIds, int), 'sceneIds must be an integer or a list/numpy array of integers'
sceneIds = sceneIds if _isArrayLike(sceneIds) else [sceneIds]
collisionLabels = {}
for sid in tqdm(sceneIds, desc='Loading collision labels...'):
labels = np.load(os.path.join(self.root, 'collision_label','scene_'+str(sid).zfill(4), 'collision_labels.npz'))
collisionLabel = []
for j in range(len(labels)):
collisionLabel.append(labels['arr_{}'.format(j)])
collisionLabels['scene_'+str(sid).zfill(4)] = collisionLabel
return collisionLabels
def loadRGB(self, sceneId, camera, annId):
'''
**Input:**
- sceneId: int of the scene index.
- camera: string of type of camera, 'realsense' or 'kinect'
- annId: int of the annotation index.
**Output:**
- numpy array of the rgb in RGB order.
'''
return cv2.cvtColor(cv2.imread(os.path.join(self.root, 'scenes', 'scene_%04d' % sceneId, camera, 'rgb', '%04d.png' % annId)), cv2.COLOR_BGR2RGB)
def loadBGR(self, sceneId, camera, annId):
'''
**Input:**
- sceneId: int of the scene index.
- camera: string of type of camera, 'realsense' or 'kinect'
- annId: int of the annotation index.
**Output:**
- numpy array of the rgb in BGR order.
'''
return cv2.imread(os.path.join(self.root, 'scenes', 'scene_%04d' % sceneId, camera, 'rgb', '%04d.png' % annId))
def loadDepth(self, sceneId, camera, annId):
'''
**Input:**
- sceneId: int of the scene index.
- camera: string of type of camera, 'realsense' or 'kinect'
- annId: int of the annotation index.
**Output:**
- numpy array of the depth with dtype = np.uint16
'''
return cv2.imread(os.path.join(self.root, 'scenes', 'scene_%04d' % sceneId, camera, 'depth', '%04d.png' % annId), cv2.IMREAD_UNCHANGED)
def loadMask(self, sceneId, camera, annId):
'''
**Input:**
- sceneId: int of the scene index.
- camera: string of type of camera, 'realsense' or 'kinect'
- annId: int of the annotation index.
**Output:**
- numpy array of the mask with dtype = np.uint16
'''
return cv2.imread(os.path.join(self.root, 'scenes', 'scene_%04d' % sceneId, camera, 'label', '%04d.png' % annId), cv2.IMREAD_UNCHANGED)
def loadWorkSpace(self, sceneId, camera, annId):
'''
**Input:**
- sceneId: int of the scene index.
- camera: string of type of camera, 'realsense' or 'kinect'
- annId: int of the annotation index.
**Output:**
- tuple of the bounding box coordinates (x1, y1, x2, y2).
'''
mask = self.loadMask(sceneId, camera, annId)
maskx = np.any(mask, axis=0)
masky = np.any(mask, axis=1)
x1 = np.argmax(maskx)
y1 = np.argmax(masky)
x2 = len(maskx) - np.argmax(maskx[::-1])
y2 = len(masky) - np.argmax(masky[::-1])
return (x1, y1, x2, y2)
def loadScenePointCloud(self, sceneId, camera, annId, align=False, format = 'open3d', use_workspace = False, use_mask = True, use_inpainting = False):
'''
**Input:**
- sceneId: int of the scene index.
- camera: string of type of camera, 'realsense' or 'kinect'
- annId: int of the annotation index.
- aligh: bool of whether align to the table frame.
- format: string of the returned type. 'open3d' or 'numpy'
- use_workspace: bool of whether crop the point cloud in the work space.
- use_mask: bool of whether crop the point cloud use mask(z>0), only open3d 0.9.0 is supported for False option.
Only turn to False if you know what you are doing.
- use_inpainting: bool of whether inpaint the depth image for the missing information.
**Output:**
- open3d.geometry.PointCloud instance of the scene point cloud.
- or tuple of numpy array of point locations and colors.
'''
colors = self.loadRGB(sceneId = sceneId, camera = camera, annId = annId).astype(np.float32) / 255.0
depths = self.loadDepth(sceneId = sceneId, camera = camera, annId = annId)
if use_inpainting:
fault_mask = depths < 200
depths[fault_mask] = 0
inpainting_mask = (np.abs(depths) < 10).astype(np.uint8)
depths = cv2.inpaint(depths, inpainting_mask, 5, cv2.INPAINT_NS)
intrinsics = np.load(os.path.join(self.root, 'scenes', 'scene_%04d' % sceneId, camera, 'camK.npy'))
fx, fy = intrinsics[0,0], intrinsics[1,1]
cx, cy = intrinsics[0,2], intrinsics[1,2]
s = 1000.0
if align:
camera_poses = np.load(os.path.join(self.root, 'scenes', 'scene_%04d' % sceneId, camera, 'camera_poses.npy'))
camera_pose = camera_poses[annId]
align_mat = np.load(os.path.join(self.root, 'scenes', 'scene_%04d' % sceneId, camera, 'cam0_wrt_table.npy'))
camera_pose = align_mat.dot(camera_pose)
xmap, ymap = np.arange(colors.shape[1]), np.arange(colors.shape[0])
xmap, ymap = np.meshgrid(xmap, ymap)
points_z = depths / s
points_x = (xmap - cx) / fx * points_z
points_y = (ymap - cy) / fy * points_z
# print(f'points_x.shape:{points_x.shape}')
# print(f'points_y.shape:{points_y.shape}')
# print(f'points_z.shape:{points_z.shape}')
if use_workspace:
(x1, y1, x2, y2) = self.loadWorkSpace(sceneId, camera, annId)
points_z = points_z[y1:y2,x1:x2]
points_x = points_x[y1:y2,x1:x2]
points_y = points_y[y1:y2,x1:x2]
colors = colors[y1:y2,x1:x2]
mask = (points_z > 0)
points = np.stack([points_x, points_y, points_z], axis=-1)
# print(f'points.shape:{points.shape}')
if use_mask:
points = points[mask]
colors = colors[mask]
else:
points = points.reshape((-1, 3))
colors = colors.reshape((-1, 3))
if align:
points = transform_points(points, camera_pose)
if format == 'open3d':
cloud = o3d.geometry.PointCloud()
cloud.points = o3d.utility.Vector3dVector(points)
cloud.colors = o3d.utility.Vector3dVector(colors)
return cloud
elif format == 'numpy':
return points, colors
else:
raise ValueError('Format must be either "open3d" or "numpy".')
def loadSceneModel(self, sceneId, camera = 'kinect', annId = 0, align = False):
'''
**Input:**
- sceneId: int of the scene index.
- camera: string of type of camera, 'realsense' or 'kinect'
- annId: int of the annotation index.
- align: bool of whether align to the table frame.
**Output:**
- open3d.geometry.PointCloud list of the scene models.
'''
if align:
camera_poses = np.load(os.path.join(self.root, 'scenes', 'scene_%04d' % sceneId, camera, 'camera_poses.npy'))
camera_pose = camera_poses[annId]
align_mat = np.load(os.path.join(self.root, 'scenes', 'scene_%04d' % sceneId, camera, 'cam0_wrt_table.npy'))
camera_pose = np.matmul(align_mat,camera_pose)
scene_reader = xmlReader(os.path.join(self.root, 'scenes', 'scene_%04d' % sceneId, camera, 'annotations', '%04d.xml'% annId))
posevectors = scene_reader.getposevectorlist()
obj_list = []
mat_list = []
model_list = []
pose_list = []
for posevector in posevectors:
obj_idx, pose = parse_posevector(posevector)
obj_list.append(obj_idx)
mat_list.append(pose)
for obj_idx, pose in zip(obj_list, mat_list):
plyfile = os.path.join(self.root, 'models', '%03d'%obj_idx, 'nontextured.ply')
model = o3d.io.read_point_cloud(plyfile)
points = np.array(model.points)
if align:
pose = np.dot(camera_pose, pose)
points = transform_points(points, pose)
model.points = o3d.utility.Vector3dVector(points)
model_list.append(model)
pose_list.append(pose)
return model_list
def loadGrasp(self, sceneId, annId=0, format = '6d', camera='kinect', grasp_labels = None, collision_labels = None, fric_coef_thresh=0.4):
'''
**Input:**
- sceneId: int of scene id.
- annId: int of annotation id.
- format: string of grasp format, '6d' or 'rect'.
- camera: string of camera type, 'kinect' or 'realsense'.
- grasp_labels: dict of grasp labels. Call self.loadGraspLabels if not given.
- collision_labels: dict of collision labels. Call self.loadCollisionLabels if not given.
- fric_coef_thresh: float of the frcition coefficient threshold of the grasp.
**ATTENTION**
the LOWER the friction coefficient is, the better the grasp is.
**Output:**
- If format == '6d', return a GraspGroup instance.
- If format == 'rect', return a RectGraspGroup instance.
'''
import numpy as np
assert format == '6d' or format == 'rect', 'format must be "6d" or "rect"'
if format == '6d':
from .utils.xmlhandler import xmlReader
from .utils.utils import get_obj_pose_list, generate_views, get_model_grasps, transform_points
from .utils.rotation import batch_viewpoint_params_to_matrix
camera_poses = np.load(os.path.join(self.root,'scenes','scene_%04d' %(sceneId,),camera, 'camera_poses.npy'))
camera_pose = camera_poses[annId]
scene_reader = xmlReader(os.path.join(self.root,'scenes','scene_%04d' %(sceneId,),camera,'annotations','%04d.xml' %(annId,)))
pose_vectors = scene_reader.getposevectorlist()
obj_list,pose_list = get_obj_pose_list(camera_pose,pose_vectors)
if grasp_labels is None:
print('warning: grasp_labels are not given, calling self.loadGraspLabels to retrieve them')
grasp_labels = self.loadGraspLabels(objIds = obj_list)
if collision_labels is None:
print('warning: collision_labels are not given, calling self.loadCollisionLabels to retrieve them')
collision_labels = self.loadCollisionLabels(sceneId)
num_views, num_angles, num_depths = 300, 12, 4
template_views = generate_views(num_views)
template_views = template_views[np.newaxis, :, np.newaxis, np.newaxis, :]
template_views = np.tile(template_views, [1, 1, num_angles, num_depths, 1])
collision_dump = collision_labels['scene_'+str(sceneId).zfill(4)]
# grasp = dict()
grasp_group = GraspGroup()
for i, (obj_idx, trans) in enumerate(zip(obj_list, pose_list)):
sampled_points, offsets, fric_coefs = grasp_labels[obj_idx]
collision = collision_dump[i]
point_inds = np.arange(sampled_points.shape[0])
num_points = len(point_inds)
target_points = sampled_points[:, np.newaxis, np.newaxis, np.newaxis, :]
target_points = np.tile(target_points, [1, num_views, num_angles, num_depths, 1])
views = np.tile(template_views, [num_points, 1, 1, 1, 1])
angles = offsets[:, :, :, :, 0]
depths = offsets[:, :, :, :, 1]
widths = offsets[:, :, :, :, 2]
mask1 = ((fric_coefs <= fric_coef_thresh) & (fric_coefs > 0) & ~collision)
target_points = target_points[mask1]
target_points = transform_points(target_points, trans)
target_points = transform_points(target_points, np.linalg.inv(camera_pose))
views = views[mask1]
angles = angles[mask1]
depths = depths[mask1]
widths = widths[mask1]
fric_coefs = fric_coefs[mask1]
Rs = batch_viewpoint_params_to_matrix(-views, angles)
Rs = np.matmul(trans[np.newaxis, :3, :3], Rs)
Rs = np.matmul(np.linalg.inv(camera_pose)[np.newaxis,:3,:3], Rs)
num_grasp = widths.shape[0]
scores = (1.1 - fric_coefs).reshape(-1,1)
widths = widths.reshape(-1,1)
heights = GRASP_HEIGHT * np.ones((num_grasp,1))
depths = depths.reshape(-1,1)
rotations = Rs.reshape((-1,9))
object_ids = obj_idx * np.ones((num_grasp,1), dtype=np.int32)
obj_grasp_array = np.hstack([scores, widths, heights, depths, rotations, target_points, object_ids]).astype(np.float32)
grasp_group.grasp_group_array = np.concatenate((grasp_group.grasp_group_array, obj_grasp_array))
return grasp_group
else:
# 'rect'
rect_grasps = RectGraspGroup(os.path.join(self.root,'scenes','scene_%04d' % sceneId,camera,'rect','%04d.npy' % annId))
return rect_grasps
def loadData(self, ids=None, *extargs):
'''
**Input:**
- ids: int or list of int of the the data ids.
- extargs: extra arguments. This function can also be called with loadData(sceneId, camera, annId)
**Output:**
- if ids is int, returns a tuple of data path
- if ids is not specified or is a list, returns a tuple of data path lists
'''
if ids is None:
return (self.rgbPath, self.depthPath, self.segLabelPath, self.metaPath, self.rectLabelPath, self.sceneName, self.annId)
if len(extargs) == 0:
if isinstance(ids, int):
return (self.rgbPath[ids], self.depthPath[ids], self.segLabelPath[ids], self.metaPath[ids], self.rectLabelPath[ids], self.sceneName[ids], self.annId[ids])
else:
return ([self.rgbPath[id] for id in ids],
[self.depthPath[id] for id in ids],
[self.segLabelPath[id] for id in ids],
[self.metaPath[id] for id in ids],
[self.rectLabelPath[id] for id in ids],
[self.sceneName[id] for id in ids],
[self.annId[id] for id in ids])
if len(extargs) == 2:
sceneId = ids
camera, annId = extargs
rgbPath = os.path.join(self.root, 'scenes', 'scene_'+str(sceneId).zfill(4), camera, 'rgb', str(annId).zfill(4)+'.png')
depthPath = os.path.join(self.root, 'scenes', 'scene_'+str(sceneId).zfill(4), camera, 'depth', str(annId).zfill(4)+'.png')
segLabelPath = os.path.join(self.root, 'scenes', 'scene_'+str(sceneId).zfill(4), camera, 'label', str(annId).zfill(4)+'.png')
metaPath = os.path.join(self.root, 'scenes', 'scene_'+str(sceneId).zfill(4), camera, 'meta', str(annId).zfill(4)+'.mat')
rectLabelPath = os.path.join(self.root, 'scenes', 'scene_'+str(sceneId).zfill(4), camera, 'rect', str(annId).zfill(4)+'.npy')
scene_name = 'scene_'+str(sceneId).zfill(4)
return (rgbPath, depthPath, segLabelPath, metaPath, rectLabelPath, scene_name,annId)
def showObjGrasp(self, objIds=[], numGrasp=10, th=0.5, maxWidth=0.08, saveFolder='save_fig', show=False):
'''
**Input:**
- objIds: int of list of objects ids.
- numGrasp: how many grasps to show in the image.
- th: threshold of the coefficient of friction.
- maxWidth: float, only visualize grasps with width<=maxWidth
- saveFolder: string of the path to save the rendered image.
- show: bool of whether to show the image.
**Output:**
- No output but save the rendered image and maybe show it.
'''
from .utils.vis import visObjGrasp
objIds = objIds if _isArrayLike(objIds) else [objIds]
if len(objIds) == 0:
print('You need to specify object ids.')
return 0
if not os.path.exists(saveFolder):
os.mkdir(saveFolder)
for obj_id in objIds:
visObjGrasp(self.root, obj_id, num_grasp=numGrasp, th=th, max_width=maxWidth, save_folder=saveFolder, show=show)
def showSceneGrasp(self, sceneId, camera = 'kinect', annId = 0, format = '6d', numGrasp = 20, show_object = True, coef_fric_thresh = 0.1):
'''
**Input:**
- sceneId: int of the scene index.
- camera: string of the camera type, 'realsense' or 'kinect'.
- annId: int of the annotation index.
- format: int of the annotation type, 'rect' or '6d'.
- numGrasp: int of the displayed grasp number, grasps will be randomly sampled.
- coef_fric_thresh: float of the friction coefficient of grasps.
'''
if format == '6d':
geometries = []
sceneGrasp = self.loadGrasp(sceneId = sceneId, annId = annId, camera = camera, format = '6d', fric_coef_thresh = coef_fric_thresh)
sceneGrasp = sceneGrasp.random_sample(numGrasp = numGrasp)
scenePCD = self.loadScenePointCloud(sceneId = sceneId, camera = camera, annId = annId, align = False)
geometries.append(scenePCD)
geometries += sceneGrasp.to_open3d_geometry_list()
if show_object:
objectPCD = self.loadSceneModel(sceneId = sceneId, camera = camera, annId = annId, align = False)
geometries += objectPCD
o3d.visualization.draw_geometries(geometries)
elif format == 'rect':
bgr = self.loadBGR(sceneId = sceneId, camera = camera, annId = annId)
sceneGrasp = self.loadGrasp(sceneId = sceneId, camera = camera, annId = annId, format = 'rect', fric_coef_thresh = coef_fric_thresh)
sceneGrasp = sceneGrasp.random_sample(numGrasp = numGrasp)
img = sceneGrasp.to_opencv_image(bgr, numGrasp = numGrasp)
cv2.imshow('Rectangle Grasps',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def show6DPose(self, sceneIds, saveFolder='save_fig', show=False, perObj=False):
'''
**Input:**
- sceneIds: int or list of scene ids.
- saveFolder: string of the folder to store the image.
- show: bool of whether to show the image.
- perObj: bool, show grasps on each object
**Output:**
- No output but to save the rendered image and maybe show the result.
'''
from .utils.vis import vis6D
sceneIds = sceneIds if _isArrayLike(sceneIds) else [sceneIds]
if len(sceneIds) == 0:
print('You need specify scene ids.')
return 0
if not os.path.exists(saveFolder):
os.mkdir(saveFolder)
for scene_id in sceneIds:
scene_name = 'scene_'+str(scene_id).zfill(4)
vis6D(self.root, scene_name, 0, self.camera,
align_to_table=True, save_folder=saveFolder, show=show, per_obj=perObj)

View File

@@ -0,0 +1,306 @@
__author__ = 'mhgou, cxwang and hsfang'
import numpy as np
import os
import time
import pickle
import open3d as o3d
from .graspnet import GraspNet
from .grasp import GraspGroup
from .utils.config import get_config
from .utils.eval_utils import get_scene_name, create_table_points, parse_posevector, load_dexnet_model, transform_points, compute_point_distance, compute_closest_points, voxel_sample_points, topk_grasps, get_grasp_score, collision_detection, eval_grasp
from .utils.xmlhandler import xmlReader
from .utils.utils import generate_scene_model
class GraspNetEval(GraspNet):
'''
Class for evaluation on GraspNet dataset.
**Input:**
- root: string of root path for the dataset.
- camera: string of type of the camera.
- split: string of the date split.
'''
def __init__(self, root, camera, split = 'test'):
super(GraspNetEval, self).__init__(root, camera, split)
def get_scene_models(self, scene_id, ann_id):
'''
return models in model coordinate
'''
model_dir = os.path.join(self.root, 'models')
# print('Scene {}, {}'.format(scene_id, camera))
scene_reader = xmlReader(os.path.join(self.root, 'scenes', get_scene_name(scene_id), self.camera, 'annotations', '%04d.xml' % (ann_id,)))
posevectors = scene_reader.getposevectorlist()
obj_list = []
model_list = []
dexmodel_list = []
for posevector in posevectors:
obj_idx, _ = parse_posevector(posevector)
obj_list.append(obj_idx)
for obj_idx in obj_list:
model = o3d.io.read_point_cloud(os.path.join(model_dir, '%03d' % obj_idx, 'nontextured.ply'))
dex_cache_path = os.path.join(self.root, 'dex_models', '%03d.pkl' % obj_idx)
if os.path.exists(dex_cache_path):
with open(dex_cache_path, 'rb') as f:
dexmodel = pickle.load(f)
else:
dexmodel = load_dexnet_model(os.path.join(model_dir, '%03d' % obj_idx, 'textured'))
points = np.array(model.points)
model_list.append(points)
dexmodel_list.append(dexmodel)
return model_list, dexmodel_list, obj_list
def get_model_poses(self, scene_id, ann_id):
'''
**Input:**
- scene_id: int of the scen index.
- ann_id: int of the annotation index.
**Output:**
- obj_list: list of int of object index.
- pose_list: list of 4x4 matrices of object poses.
- camera_pose: 4x4 matrix of the camera pose relative to the first frame.
- align mat: 4x4 matrix of camera relative to the table.
'''
scene_dir = os.path.join(self.root, 'scenes')
camera_poses_path = os.path.join(self.root, 'scenes', get_scene_name(scene_id), self.camera, 'camera_poses.npy')
camera_poses = np.load(camera_poses_path)
camera_pose = camera_poses[ann_id]
align_mat_path = os.path.join(self.root, 'scenes', get_scene_name(scene_id), self.camera, 'cam0_wrt_table.npy')
align_mat = np.load(align_mat_path)
# print('Scene {}, {}'.format(scene_id, camera))
scene_reader = xmlReader(os.path.join(scene_dir, get_scene_name(scene_id), self.camera, 'annotations', '%04d.xml'% (ann_id,)))
posevectors = scene_reader.getposevectorlist()
obj_list = []
pose_list = []
for posevector in posevectors:
obj_idx, mat = parse_posevector(posevector)
obj_list.append(obj_idx)
pose_list.append(mat)
return obj_list, pose_list, camera_pose, align_mat
def eval_scene(self, scene_id, dump_folder, TOP_K = 50, return_list = False,vis = False, max_width = 0.1):
'''
**Input:**
- scene_id: int of the scene index.
- dump_folder: string of the folder that saves the dumped npy files.
- TOP_K: int of the top number of grasp to evaluate
- return_list: bool of whether to return the result list.
- vis: bool of whether to show the result
- max_width: float of the maximum gripper width in evaluation
**Output:**
- scene_accuracy: np.array of shape (256, 50, 6) of the accuracy tensor.
'''
config = get_config()
table = create_table_points(1.0, 1.0, 0.05, dx=-0.5, dy=-0.5, dz=-0.05, grid_size=0.008)
list_coe_of_friction = [0.2,0.4,0.6,0.8,1.0,1.2]
model_list, dexmodel_list, _ = self.get_scene_models(scene_id, ann_id=0)
model_sampled_list = list()
for model in model_list:
model_sampled = voxel_sample_points(model, 0.008)
model_sampled_list.append(model_sampled)
scene_accuracy = []
grasp_list_list = []
score_list_list = []
collision_list_list = []
for ann_id in range(256):
grasp_group = GraspGroup().from_npy(os.path.join(dump_folder,get_scene_name(scene_id), self.camera, '%04d.npy' % (ann_id,)))
_, pose_list, camera_pose, align_mat = self.get_model_poses(scene_id, ann_id)
table_trans = transform_points(table, np.linalg.inv(np.matmul(align_mat, camera_pose)))
# clip width to [0,max_width]
gg_array = grasp_group.grasp_group_array
min_width_mask = (gg_array[:,1] < 0)
max_width_mask = (gg_array[:,1] > max_width)
gg_array[min_width_mask,1] = 0
gg_array[max_width_mask,1] = max_width
grasp_group.grasp_group_array = gg_array
grasp_list, score_list, collision_mask_list = eval_grasp(grasp_group, model_sampled_list, dexmodel_list, pose_list, config, table=table_trans, voxel_size=0.008, TOP_K = TOP_K)
# remove empty
grasp_list = [x for x in grasp_list if len(x) != 0]
score_list = [x for x in score_list if len(x) != 0]
collision_mask_list = [x for x in collision_mask_list if len(x)!=0]
if len(grasp_list) == 0:
grasp_accuracy = np.zeros((TOP_K,len(list_coe_of_friction)))
scene_accuracy.append(grasp_accuracy)
grasp_list_list.append([])
score_list_list.append([])
collision_list_list.append([])
print('\rMean Accuracy for scene:{} ann:{}='.format(scene_id, ann_id),np.mean(grasp_accuracy[:,:]), end='')
continue
# concat into scene level
grasp_list, score_list, collision_mask_list = np.concatenate(grasp_list), np.concatenate(score_list), np.concatenate(collision_mask_list)
if vis:
t = o3d.geometry.PointCloud()
t.points = o3d.utility.Vector3dVector(table_trans)
model_list = generate_scene_model(self.root, 'scene_%04d' % scene_id , ann_id, return_poses=False, align=False, camera=self.camera)
import copy
gg = GraspGroup(copy.deepcopy(grasp_list))
scores = np.array(score_list)
scores = scores / 2 + 0.5 # -1 -> 0, 0 -> 0.5, 1 -> 1
scores[collision_mask_list] = 0.3
gg.scores = scores
gg.widths = 0.1 * np.ones((len(gg)), dtype = np.float32)
grasps_geometry = gg.to_open3d_geometry_list()
pcd = self.loadScenePointCloud(scene_id, self.camera, ann_id)
o3d.visualization.draw_geometries([pcd, *grasps_geometry])
o3d.visualization.draw_geometries([pcd, *grasps_geometry, *model_list])
o3d.visualization.draw_geometries([*grasps_geometry, *model_list, t])
# sort in scene level
grasp_confidence = grasp_list[:,0]
indices = np.argsort(-grasp_confidence)
grasp_list, score_list, collision_mask_list = grasp_list[indices], score_list[indices], collision_mask_list[indices]
grasp_list_list.append(grasp_list)
score_list_list.append(score_list)
collision_list_list.append(collision_mask_list)
#calculate AP
grasp_accuracy = np.zeros((TOP_K,len(list_coe_of_friction)))
for fric_idx, fric in enumerate(list_coe_of_friction):
for k in range(0,TOP_K):
if k+1 > len(score_list):
grasp_accuracy[k,fric_idx] = np.sum(((score_list<=fric) & (score_list>0)).astype(int))/(k+1)
else:
grasp_accuracy[k,fric_idx] = np.sum(((score_list[0:k+1]<=fric) & (score_list[0:k+1]>0)).astype(int))/(k+1)
print('\rMean Accuracy for scene:%04d ann:%04d = %.3f' % (scene_id, ann_id, 100.0 * np.mean(grasp_accuracy[:,:])), end='', flush=True)
scene_accuracy.append(grasp_accuracy)
if not return_list:
return scene_accuracy
else:
return scene_accuracy, grasp_list_list, score_list_list, collision_list_list
def parallel_eval_scenes(self, scene_ids, dump_folder, proc = 2):
'''
**Input:**
- scene_ids: list of int of scene index.
- dump_folder: string of the folder that saves the npy files.
- proc: int of the number of processes to use to evaluate.
**Output:**
- scene_acc_list: list of the scene accuracy.
'''
from multiprocessing import Pool
p = Pool(processes = proc)
res_list = []
for scene_id in scene_ids:
res_list.append(p.apply_async(self.eval_scene, (scene_id, dump_folder)))
p.close()
p.join()
scene_acc_list = []
for res in res_list:
scene_acc_list.append(res.get())
return scene_acc_list
def eval_seen(self, dump_folder, proc = 2):
'''
**Input:**
- dump_folder: string of the folder that saves the npy files.
- proc: int of the number of processes to use to evaluate.
**Output:**
- res: numpy array of the detailed accuracy.
- ap: float of the AP for seen split.
'''
res = np.array(self.parallel_eval_scenes(scene_ids = list(range(100, 130)), dump_folder = dump_folder, proc = proc))
ap = np.mean(res)
print('\nEvaluation Result:\n----------\n{}, AP Seen={}'.format(self.camera, ap))
return res, ap
def eval_similar(self, dump_folder, proc = 2):
'''
**Input:**
- dump_folder: string of the folder that saves the npy files.
- proc: int of the number of processes to use to evaluate.
**Output:**
- res: numpy array of the detailed accuracy.
- ap: float of the AP for similar split.
'''
res = np.array(self.parallel_eval_scenes(scene_ids = list(range(130, 160)), dump_folder = dump_folder, proc = proc))
ap = np.mean(res)
print('\nEvaluation Result:\n----------\n{}, AP={}, AP Similar={}'.format(self.camera, ap, ap))
return res, ap
def eval_novel(self, dump_folder, proc = 2):
'''
**Input:**
- dump_folder: string of the folder that saves the npy files.
- proc: int of the number of processes to use to evaluate.
**Output:**
- res: numpy array of the detailed accuracy.
- ap: float of the AP for novel split.
'''
res = np.array(self.parallel_eval_scenes(scene_ids = list(range(160, 190)), dump_folder = dump_folder, proc = proc))
ap = np.mean(res)
print('\nEvaluation Result:\n----------\n{}, AP={}, AP Novel={}'.format(self.camera, ap, ap))
return res, ap
def eval_all(self, dump_folder, proc = 2):
'''
**Input:**
- dump_folder: string of the folder that saves the npy files.
- proc: int of the number of processes to use to evaluate.
**Output:**
- res: numpy array of the detailed accuracy.
- ap: float of the AP for all split.
'''
res = np.array(self.parallel_eval_scenes(scene_ids = list(range(100, 190)), dump_folder = dump_folder, proc = proc))
ap = [np.mean(res), np.mean(res[0:30]), np.mean(res[30:60]), np.mean(res[60:90])]
print('\nEvaluation Result:\n----------\n{}, AP={}, AP Seen={}, AP Similar={}, AP Novel={}'.format(self.camera, ap[0], ap[1], ap[2], ap[3]))
return res, ap

View File

@@ -0,0 +1,18 @@
def get_config():
'''
- return the config dict
'''
config = dict()
force_closure = dict()
force_closure['quality_method'] = 'force_closure'
force_closure['num_cone_faces'] = 8
force_closure['soft_fingers'] = 1
force_closure['quality_type'] = 'quasi_static'
force_closure['all_contacts_required']= 1
force_closure['check_approach'] = False
force_closure['torque_scaling'] = 0.01
force_closure['wrench_norm_thresh'] = 0.001
force_closure['wrench_regularizer'] = 0.0000000001
config['metrics'] = dict()
config['metrics']['force_closure'] = force_closure
return config

View File

@@ -0,0 +1,18 @@
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.

View File

@@ -0,0 +1,24 @@
# # -*- coding: utf-8 -*-
# """
# Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
# Permission to use, copy, modify, and distribute this software and its documentation for educational,
# research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
# hereby granted, provided that the above copyright notice, this paragraph and the following two
# paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
# Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
# 7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
# IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
# INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
# THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
# HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
# """
# # from .constants import *
# # from .abstractstatic import abstractstatic
# # from .api import DexNet

View File

@@ -0,0 +1,30 @@
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
# Abstact static methods
# Source: https://stackoverflow.com/questions/4474395/staticmethod-and-abc-abstractmethod-will-it-blend
class abstractstatic(staticmethod):
__slots__ = ()
def __init__(self, function):
super(abstractstatic, self).__init__(function)
function.__isabstractmethod__ = True
__isabstractmethod__ = True

View File

@@ -0,0 +1,44 @@
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
# Grasp contact params
NO_CONTACT_DIST = 0.2 # distance to points that are not in contact for window extraction
WIN_DIST_LIM = 0.02 # limits for window plotting
# File extensions
HDF5_EXT = '.hdf5'
OBJ_EXT = '.obj'
OFF_EXT = '.off'
STL_EXT = '.stl'
SDF_EXT = '.sdf'
URDF_EXT = '.urdf'
# Tags for intermediate files
DEC_TAG = '_dec'
PROC_TAG = '_proc'
# Solver default max iterations
DEF_MAX_ITER = 100
# Access levels for db
READ_ONLY_ACCESS = 'READ_ONLY'
READ_WRITE_ACCESS = 'READ_WRITE'
WRITE_ACCESS = 'WRITE'

View File

@@ -0,0 +1,20 @@
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""

View File

@@ -0,0 +1,703 @@
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
"""
Contact class that encapsulates friction cone and surface window computation.
Authors: Brian Hou and Jeff Mahler
"""
from abc import ABCMeta, abstractmethod
import itertools as it
import logging
import numpy as np
from skimage.restoration import denoise_bilateral
from autolab_core import RigidTransform
from ..constants import NO_CONTACT_DIST
from ..constants import WIN_DIST_LIM
import IPython
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
# class Contact(metaclass=ABCMeta): # for python3
class Contact:
""" Abstract class for contact models. """
__metaclass__ = ABCMeta
class Contact3D(Contact):
""" 3D contact points.
Attributes
----------
graspable : :obj:`GraspableObject3D`
object to use to get contact information
contact_point : 3x1 :obj:`numpy.ndarray`
point of contact on the object
in_direction : 3x1 :obj:`numpy.ndarray`
direction along which contact was made
normal : normalized 3x1 :obj:`numpy.ndarray`
surface normal at the contact point
"""
def __init__(self, graspable, contact_point, in_direction=None):
self.graspable_ = graspable
self.point_ = contact_point # in world coordinates
# cached attributes
self.in_direction_ = in_direction # inward facing grasp axis
self.friction_cone_ = None
self.normal_ = None # outward facing normal
self.surface_info_ = None
self._compute_normal()
@property
def graspable(self):
return self.graspable_
@property
def point(self):
return self.point_
@property
def normal(self):
return self.normal_
@normal.setter
def normal(self, normal):
self.normal_ = normal
@property
def in_direction(self):
return self.in_direction_
def _compute_normal(self):
"""Compute outward facing normal at contact, according to in_direction.
Indexes into the SDF grid coordinates to lookup the normal info.
"""
# tf to grid
as_grid = self.graspable.sdf.transform_pt_obj_to_grid(self.point)
on_surface, _ = self.graspable.sdf.on_surface(as_grid)
if not on_surface:
logging.debug('Contact point not on surface')
return None
# compute outward facing normal from SDF
normal = self.graspable.sdf.surface_normal(as_grid)
# flip normal to point outward if in_direction is defined
if self.in_direction_ is not None and np.dot(self.in_direction_, normal) > 0:
normal = -normal
# transform to world frame
normal = self.graspable.sdf.transform_pt_grid_to_obj(normal, direction=True)
self.normal_ = normal
def tangents(self, direction=None, align_axes=True, max_samples=1000):
"""Returns the direction vector and tangent vectors at a contact point.
The direction vector defaults to the *inward-facing* normal vector at
this contact.
The direction and tangent vectors for a right handed coordinate frame.
Parameters
----------
direction : 3x1 :obj:`numpy.ndarray`
direction to find orthogonal plane for
align_axes : bool
whether or not to align the tangent plane to the object reference frame
max_samples : int
number of samples to use in discrete optimization for alignment of reference frame
Returns
-------
direction : normalized 3x1 :obj:`numpy.ndarray`
direction to find orthogonal plane for
t1 : normalized 3x1 :obj:`numpy.ndarray`
first tangent vector, x axis
t2 : normalized 3x1 :obj:`numpy.ndarray`
second tangent vector, y axis
"""
# illegal contact, cannot return tangents
if self.normal_ is None:
return None, None, None
# default to inward pointing normal
if direction is None:
direction = -self.normal_
# force direction to face inward
if np.dot(self.normal_, direction) > 0:
direction = -direction
# transform to
direction = direction.reshape((3, 1)) # make 2D for SVD
# get orthogonal plane
U, _, _ = np.linalg.svd(direction)
# U[:, 1:] spans the tanget plane at the contact
x, y = U[:, 1], U[:, 2]
# make sure t1 and t2 obey right hand rule
z_hat = np.cross(x, y)
if z_hat.dot(direction) < 0:
y = -y
v = x
w = y
# redefine tangent x axis to automatically align with the object x axis
if align_axes:
max_ip = 0
max_theta = 0
target = np.array([1, 0, 0])
theta = 0
d_theta = 2 * np.pi / float(max_samples)
for i in range(max_samples):
v = np.cos(theta) * x + np.sin(theta) * y
if v.dot(target) > max_ip:
max_ip = v.dot(target)
max_theta = theta
theta = theta + d_theta
v = np.cos(max_theta) * x + np.sin(max_theta) * y
w = np.cross(direction.ravel(), v)
return np.squeeze(direction), v, w
def reference_frame(self, align_axes=True):
"""Returns the local reference frame of the contact.
Z axis in the in direction (or surface normal if not specified)
X and Y axes in the tangent plane to the direction
Parameters
----------
align_axes : bool
whether or not to align to the object axes
Returns
-------
:obj:`RigidTransform`
rigid transformation from contact frame to object frame
"""
t_obj_contact = self.point
rz, rx, ry = self.tangents(self.in_direction_, align_axes=align_axes)
R_obj_contact = np.array([rx, ry, rz]).T
T_contact_obj = RigidTransform(rotation=R_obj_contact,
translation=t_obj_contact,
from_frame='contact', to_frame='obj')
return T_contact_obj
def normal_force_magnitude(self):
""" Returns the component of the force that the contact would apply along the normal direction.
Returns
-------
float
magnitude of force along object surface normal
"""
normal_force_mag = 1.0
if self.in_direction_ is not None and self.normal_ is not None:
in_normal = -self.normal_
in_direction_norm = self.in_direction_ / np.linalg.norm(self.in_direction_)
normal_force_mag = np.dot(in_direction_norm, in_normal)
return max(normal_force_mag, 0.0)
def friction_cone(self, num_cone_faces=8, friction_coef=0.5):
""" Computes the friction cone and normal for a contact point.
Parameters
----------
num_cone_faces : int
number of cone faces to use in discretization
friction_coef : float
coefficient of friction at contact point
Returns
-------
success : bool
False when cone can't be computed
cone_support : :obj:`numpy.ndarray`
array where each column is a vector on the boundary of the cone
normal : normalized 3x1 :obj:`numpy.ndarray`
outward facing surface normal
"""
if self.friction_cone_ is not None and self.normal_ is not None:
return True, self.friction_cone_, self.normal_
# get normal and tangents
in_normal, t1, t2 = self.tangents()
if in_normal is None:
return False, self.friction_cone_, self.normal_
friction_cone_valid = True
# check whether contact would slip, which is whether or not the tangent force is always
# greater than the frictional force
if self.in_direction_ is not None:
in_direction_norm = self.in_direction_ / np.linalg.norm(self.in_direction_)
normal_force_mag = self.normal_force_magnitude()
tan_force_x = np.dot(in_direction_norm, t1)
tan_force_y = np.dot(in_direction_norm, t2)
tan_force_mag = np.sqrt(tan_force_x ** 2 + tan_force_y ** 2)
friction_force_mag = friction_coef * normal_force_mag
if friction_force_mag < tan_force_mag:
logging.debug('Contact would slip')
return False, self.friction_cone_, self.normal_
# set up friction cone
tan_len = friction_coef
force = in_normal
cone_support = np.zeros((3, num_cone_faces))
# find convex combinations of tangent vectors
for j in range(num_cone_faces):
tan_vec = t1 * np.cos(2 * np.pi * (float(j) / num_cone_faces)) + t2 * np.sin(
2 * np.pi * (float(j) / num_cone_faces))
cone_support[:, j] = force + friction_coef * tan_vec
self.friction_cone_ = cone_support
return True, self.friction_cone_, self.normal_
def torques(self, forces):
"""
Get the torques that can be applied by a set of force vectors at the contact point.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
the forces applied at the contact
Returns
-------
success : bool
whether or not computation was successful
torques : 3xN :obj:`numpy.ndarray`
the torques that can be applied by given forces at the contact
"""
as_grid = self.graspable.sdf.transform_pt_obj_to_grid(self.point)
on_surface, _ = self.graspable.sdf.on_surface(as_grid)
if not on_surface:
logging.debug('Contact point not on surface')
return False, None
num_forces = forces.shape[1]
torques = np.zeros([3, num_forces])
moment_arm = self.graspable.moment_arm(self.point)
for i in range(num_forces):
torques[:, i] = np.cross(moment_arm, forces[:, i])
return True, torques
def surface_window_sdf(self, width=1e-2, num_steps=21):
"""Returns a window of SDF values on the tangent plane at a contact point.
Used for patch computation.
Parameters
----------
width : float
width of the window in obj frame
num_steps : int
number of steps to use along the contact in direction
Returns
-------
window : NUM_STEPSxNUM_STEPS :obj:`numpy.ndarray`
array of distances from tangent plane to obj along in direction, False if surface window can't be computed
"""
in_normal, t1, t2 = self.tangents()
if in_normal is None: # normal and tangents not found
return False
scales = np.linspace(-width / 2.0, width / 2.0, num_steps)
window = np.zeros(num_steps ** 2)
for i, (c1, c2) in enumerate(it.product(scales, repeat=2)):
curr_loc = self.point + c1 * t1 + c2 * t2
curr_loc_grid = self.graspable.sdf.transform_pt_obj_to_grid(curr_loc)
if self.graspable.sdf.is_out_of_bounds(curr_loc_grid):
window[i] = -1e-2
continue
window[i] = self.graspable.sdf[curr_loc_grid]
return window.reshape((num_steps, num_steps))
def _compute_surface_window_projection(self, u1=None, u2=None, width=1e-2,
num_steps=21, max_projection=0.1, back_up=0, samples_per_grid=2.0,
sigma_range=0.1, sigma_spatial=1, direction=None, vis=False,
compute_weighted_covariance=False,
disc=False, num_radial_steps=5, debug_objs=None):
"""Compute the projection window onto the basis defined by u1 and u2.
Params:
u1, u2 - orthogonal numpy 3 arrays
width - float width of the window in obj frame
num_steps - int number of steps
max_projection - float maximum amount to search forward for a
contact (meters)
back_up - amount in meters to back up before projecting
samples_per_grid - float number of samples per grid when finding contacts
sigma - bandwidth of gaussian filter on window
direction - dir to do the projection along
compute_weighted_covariance - whether to return the weighted
covariance matrix, along with the window
Returns:
window - numpy NUM_STEPSxNUM_STEPS array of distances from tangent
plane to obj, False if surface window can't be computed
"""
direction, t1, t2 = self.tangents(direction)
if direction is None: # normal and tangents not found
raise ValueError('Direction could not be computed')
if u1 is not None and u2 is not None: # use given basis
t1, t2 = u1, u2
# number of samples used when looking for contacts
no_contact = NO_CONTACT_DIST
num_samples = int(samples_per_grid * (max_projection + back_up) / self.graspable.sdf.resolution)
window = np.zeros(num_steps ** 2)
res = width / num_steps
scales = np.linspace(-width / 2.0 + res / 2.0, width / 2.0 - res / 2.0, num_steps)
scales_it = it.product(scales, repeat=2)
if disc:
scales_it = []
for i in range(num_steps):
theta = 2.0 * np.pi / i
for j in range(num_radial_steps):
r = (j + 1) * width / num_radial_steps
p = (r * np.cos(theta), r * np.sin(theta))
scales_it.append(p)
# start computing weighted covariance matrix
if compute_weighted_covariance:
cov = np.zeros((3, 3))
cov_weight = 0
if vis:
ax = plt.gca(projection='3d')
self.graspable_.sdf.scatter()
for i, (c1, c2) in enumerate(scales_it):
curr_loc = self.point + c1 * t1 + c2 * t2
curr_loc_grid = self.graspable.sdf.transform_pt_obj_to_grid(curr_loc)
if self.graspable.sdf.is_out_of_bounds(curr_loc_grid):
window[i] = no_contact
continue
if vis:
ax.scatter(curr_loc_grid[0], curr_loc_grid[1], curr_loc_grid[2], s=130, c='y')
found, projection_contact = self.graspable._find_projection(
curr_loc, direction, max_projection, back_up, num_samples, vis=vis)
if found:
# logging.debug('%d found.' %(i))
sign = direction.dot(projection_contact.point - curr_loc)
projection = (sign / abs(sign)) * np.linalg.norm(projection_contact.point - curr_loc)
projection = min(projection, max_projection)
if compute_weighted_covariance:
# weight according to SHOT: R - d_i
weight = width / np.sqrt(2) - np.sqrt(c1 ** 2 + c2 ** 2)
diff = (projection_contact.point - self.point).reshape((3, 1))
cov += weight * np.dot(diff, diff.T)
cov_weight += weight
else:
logging.debug('%d not found.' % (i))
projection = no_contact
window[i] = projection
if vis:
plt.show()
if not disc:
window = window.reshape((num_steps, num_steps)).T # transpose to make x-axis along columns
if debug_objs is not None:
debug_objs.append(window)
# apply bilateral filter
if sigma_range > 0.0 and sigma_spatial > 0.0:
window_min_val = np.min(window)
window_pos = window - window_min_val
window_pos_blur = denoise_bilateral(window_pos, sigma_range=sigma_range, sigma_spatial=sigma_spatial,
mode='nearest')
window = window_pos_blur + window_min_val
if compute_weighted_covariance:
if cov_weight > 0:
return window, cov / cov_weight
return window, cov
return window
def surface_window_projection_unaligned(self, width=1e-2, num_steps=21,
max_projection=0.1, back_up=0.0, samples_per_grid=2.0,
sigma=1.5, direction=None, vis=False):
"""Projects the local surface onto the tangent plane at a contact point. Deprecated.
"""
return self._compute_surface_window_projection(width=width,
num_steps=num_steps, max_projection=max_projection,
back_up=back_up, samples_per_grid=samples_per_grid,
sigma=sigma, direction=direction, vis=vis)
def surface_window_projection(self, width=1e-2, num_steps=21,
max_projection=0.1, back_up=0.0, samples_per_grid=2.0,
sigma_range=0.1, sigma_spatial=1, direction=None, compute_pca=False, vis=False,
debug_objs=None):
"""Projects the local surface onto the tangent plane at a contact point.
Parameters
----------
width : float
width of the window in obj frame
num_steps : int
number of steps to use along the in direction
max_projection : float
maximum amount to search forward for a contact (meters)
back_up : float
amount to back up before finding a contact in meters
samples_per_grid : float
number of samples per grid when finding contacts
sigma_range : float
bandwidth of bilateral range filter on window
sigma_spatial : float
bandwidth of gaussian spatial filter of bilateral filter
direction : 3x1 :obj:`numpy.ndarray`
dir to do the projection along
Returns
-------
window : NUM_STEPSxNUM_STEPS :obj:`numpy.ndarray`
array of distances from tangent plane to obj, False if surface window can't be computed
"""
# get initial projection
direction, t1, t2 = self.tangents(direction)
window, cov = self._compute_surface_window_projection(t1, t2,
width=width, num_steps=num_steps,
max_projection=max_projection,
back_up=back_up, samples_per_grid=samples_per_grid,
sigma_range=sigma_range, sigma_spatial=sigma_spatial,
direction=direction,
vis=False, compute_weighted_covariance=True,
debug_objs=debug_objs)
if not compute_pca:
return window
# compute principal axis
pca = PCA()
pca.fit(cov)
R = pca.components_
principal_axis = R[0, :]
if np.isclose(abs(np.dot(principal_axis, direction)), 1):
# principal axis is aligned with direction of projection, use secondary axis
principal_axis = R[1, :]
if vis:
# reshape window
window = window.reshape((num_steps, num_steps))
# project principal axis onto tangent plane (t1, t2) to get u1
u1t = np.array([np.dot(principal_axis, t1), np.dot(principal_axis, t2)])
u2t = np.array([-u1t[1], u1t[0]])
if sigma > 0:
window = spfilt.gaussian_filter(window, sigma)
plt.figure()
plt.title('Principal Axis')
plt.imshow(window, extent=[0, num_steps - 1, num_steps - 1, 0],
interpolation='none', cmap=plt.cm.binary)
plt.colorbar()
plt.clim(-WIN_DIST_LIM, WIN_DIST_LIM) # fixing color range for visual comparisons
center = num_steps // 2
plt.scatter([center, center * u1t[0] + center], [center, -center * u1t[1] + center], color='blue')
plt.scatter([center, center * u2t[0] + center], [center, -center * u2t[1] + center], color='green')
u1 = np.dot(principal_axis, t1) * t1 + np.dot(principal_axis, t2) * t2
u2 = np.cross(direction, u1) # u2 must be orthogonal to u1 on plane
u1 = u1 / np.linalg.norm(u1)
u2 = u2 / np.linalg.norm(u2)
window = self._compute_surface_window_projection(u1, u2,
width=width, num_steps=num_steps,
max_projection=max_projection,
back_up=back_up, samples_per_grid=samples_per_grid,
sigma=sigma, direction=direction, vis=False)
# arbitrarily require that right_avg > left_avg (inspired by SHOT)
left_avg = np.average(window[:, :num_steps // 2])
right_avg = np.average(window[:, num_steps // 2:])
if left_avg > right_avg:
# need to flip both u1 and u2, i.e. rotate 180 degrees
window = np.rot90(window, k=2)
if vis:
if sigma > 0:
window = spfilt.gaussian_filter(window, sigma)
plt.figure()
plt.title('Tfd')
plt.imshow(window, extent=[0, num_steps - 1, num_steps - 1, 0],
interpolation='none', cmap=plt.cm.binary)
plt.colorbar()
plt.clim(-WIN_DIST_LIM, WIN_DIST_LIM) # fixing color range for visual comparisons
plt.show()
return window
def surface_information(self, width, num_steps, sigma_range=0.1, sigma_spatial=1,
back_up=0.0, max_projection=0.1, direction=None, debug_objs=None, samples_per_grid=2):
"""
Returns the local surface window, gradient, and curvature for a single contact.
Parameters
----------
width : float
width of surface window in object frame
num_steps : int
number of steps to use along the in direction
sigma_range : float
bandwidth of bilateral range filter on window
sigma_spatial : float
bandwidth of gaussian spatial filter of bilateral filter
back_up : float
amount to back up before finding a contact in meters
max_projection : float
maximum amount to search forward for a contact (meters)
direction : 3x1 :obj:`numpy.ndarray`
direction along width to render the window
debug_objs : :obj:`list`
list to put debugging info into
samples_per_grid : float
number of samples per grid when finding contacts
Returns
-------
surface_window : :obj:`SurfaceWindow`
window information for local surface patch of contact on the given object
"""
if self.surface_info_ is not None:
return self.surface_info_
if direction is None:
direction = self.in_direction_
proj_window = self.surface_window_projection(width, num_steps,
sigma_range=sigma_range, sigma_spatial=sigma_spatial,
back_up=back_up, max_projection=max_projection,
samples_per_grid=samples_per_grid,
direction=direction, vis=False, debug_objs=debug_objs)
if proj_window is None:
raise ValueError('Surface window could not be computed')
grad_win = np.gradient(proj_window)
hess_x = np.gradient(grad_win[0])
hess_y = np.gradient(grad_win[1])
gauss_curvature = np.zeros(proj_window.shape)
for i in range(num_steps):
for j in range(num_steps):
local_hess = np.array([[hess_x[0][i, j], hess_x[1][i, j]],
[hess_y[0][i, j], hess_y[1][i, j]]])
# symmetrize
local_hess = (local_hess + local_hess.T) / 2.0
# curvature
gauss_curvature[i, j] = np.linalg.det(local_hess)
return SurfaceWindow(proj_window, grad_win, hess_x, hess_y, gauss_curvature)
def plot_friction_cone(self, color='y', scale=1.0):
success, cone, in_normal = self.friction_cone()
ax = plt.gca(projection='3d')
self.graspable.sdf.scatter() # object
x, y, z = self.graspable.sdf.transform_pt_obj_to_grid(self.point)
nx, ny, nz = self.graspable.sdf.transform_pt_obj_to_grid(in_normal, direction=True)
ax.scatter([x], [y], [z], c=color, s=60) # contact
ax.scatter([x - nx], [y - ny], [z - nz], c=color, s=60) # normal
if success:
ax.scatter(x + scale * cone[0], y + scale * cone[1], z + scale * cone[2], c=color, s=40) # cone
ax.set_xlim3d(0, self.graspable.sdf.dims_[0])
ax.set_ylim3d(0, self.graspable.sdf.dims_[1])
ax.set_zlim3d(0, self.graspable.sdf.dims_[2])
return plt.Rectangle((0, 0), 1, 1, fc=color) # return a proxy for legend
class SurfaceWindow:
"""Struct for encapsulating local surface window features.
Attributes
----------
proj_win : NxN :obj:`numpy.ndarray`
the window of distances to a surface (depth image created by orthographic projection)
grad : NxN :obj:`numpy.ndarray`
X and Y gradients of the projection window
hess_x : NxN :obj:`numpy.ndarray`
hessian, partial derivatives of the X gradient window
hess_y : NxN :obj:`numpy.ndarray`
hessian, partial derivatives of the Y gradient window
gauss_curvature : NxN :obj:`numpy.ndarray`
gauss curvature at each point (function of hessian determinant)
"""
def __init__(self, proj_win, grad, hess_x, hess_y, gauss_curvature):
self.proj_win_ = proj_win
self.grad_ = grad
self.hess_x_ = hess_x
self.hess_y_ = hess_y
self.gauss_curvature_ = gauss_curvature
@property
def proj_win_2d(self):
return self.proj_win_
@property
def proj_win(self):
return self.proj_win_.flatten()
@property
def grad_x(self):
return self.grad_[0].flatten()
@property
def grad_y(self):
return self.grad_[1].flatten()
@property
def grad_x_2d(self):
return self.grad_[0]
@property
def grad_y_2d(self):
return self.grad_[1]
@property
def curvature(self):
return self.gauss_curvature_.flatten()
def asarray(self, proj_win_weight=0.0, grad_x_weight=0.0,
grad_y_weight=0.0, curvature_weight=0.0):
proj_win = proj_win_weight * self.proj_win
grad_x = grad_x_weight * self.grad_x
grad_y = grad_y_weight * self.grad_y
curvature = curvature_weight * self.gauss_curvature
return np.append([], [proj_win, grad_x, grad_y, curvature])

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,201 @@
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
"""
Configurations for grasp quality computation.
Author: Jeff Mahler
"""
from abc import ABCMeta, abstractmethod
import copy
import itertools as it
import logging
import matplotlib.pyplot as plt
try:
import mayavi.mlab as mlab
except:
# logging.warning('Failed to import mayavi')
pass
import numpy as np
import os
import sys
import time
import IPython
# class GraspQualityConfig(object, metaclass=ABCMeta):
class GraspQualityConfig(object):
"""
Base wrapper class for parameters used in grasp quality computation.
Used to elegantly enforce existence and type of required parameters.
Attributes
----------
config : :obj:`dict`
dictionary mapping parameter names to parameter values
"""
__metaclass__ = ABCMeta
def __init__(self, config):
# check valid config
self.check_valid(config)
# parse config
for key, value in list(config.items()):
setattr(self, key, value)
def contains(self, key):
""" Checks whether or not the key is supported """
if key in list(self.__dict__.keys()):
return True
return False
def __getattr__(self, key):
if self.contains(key):
return object.__getattribute__(self, key)
return None
def __getitem__(self, key):
if self.contains(key):
return object.__getattribute__(self, key)
raise KeyError('Key %s not found' %(key))
def keys(self):
return list(self.__dict__.keys())
@abstractmethod
def check_valid(self, config):
""" Raise an exception if the config is missing required keys """
pass
class QuasiStaticGraspQualityConfig(GraspQualityConfig):
"""
Parameters for quasi-static grasp quality computation.
Attributes
----------
config : :obj:`dict`
dictionary mapping parameter names to parameter values
Notes
-----
Required configuration key-value pairs in Other Parameters.
Other Parameters
----------------
quality_method : :obj:`str`
string name of quasi-static quality metric
friction_coef : float
coefficient of friction at contact point
num_cone_faces : int
number of faces to use in friction cone approximation
soft_fingers : bool
whether to use a soft finger model
quality_type : :obj:`str`
string name of grasp quality type (e.g. quasi-static, robust quasi-static)
check_approach : bool
whether or not to check the approach direction
"""
REQUIRED_KEYS = ['quality_method',
'friction_coef',
'num_cone_faces',
'soft_fingers',
'quality_type',
'check_approach',
'all_contacts_required']
def __init__(self, config):
GraspQualityConfig.__init__(self, config)
def __copy__(self):
""" Makes a copy of the config """
obj_copy = QuasiStaticGraspQualityConfig(self.__dict__)
return obj_copy
def check_valid(self, config):
for key in QuasiStaticGraspQualityConfig.REQUIRED_KEYS:
if key not in list(config.keys()):
raise ValueError('Invalid configuration. Key %s must be specified' %(key))
class RobustQuasiStaticGraspQualityConfig(GraspQualityConfig):
"""
Parameters for quasi-static grasp quality computation.
Attributes
----------
config : :obj:`dict`
dictionary mapping parameter names to parameter values
Notes
-----
Required configuration key-value pairs in Other Parameters.
Other Parameters
----------------
quality_method : :obj:`str`
string name of quasi-static quality metric
friction_coef : float
coefficient of friction at contact point
num_cone_faces : int
number of faces to use in friction cone approximation
soft_fingers : bool
whether to use a soft finger model
quality_type : :obj:`str`
string name of grasp quality type (e.g. quasi-static, robust quasi-static)
check_approach : bool
whether or not to check the approach direction
num_quality_samples : int
number of samples to use
"""
ROBUST_REQUIRED_KEYS = ['num_quality_samples']
def __init__(self, config):
GraspQualityConfig.__init__(self, config)
def __copy__(self):
""" Makes a copy of the config """
obj_copy = RobustQuasiStaticGraspQualityConfig(self.__dict__)
return obj_copy
def check_valid(self, config):
required_keys = QuasiStaticGraspQualityConfig.REQUIRED_KEYS + \
RobustQuasiStaticGraspQualityConfig.ROBUST_REQUIRED_KEYS
for key in required_keys:
if key not in list(config.keys()):
raise ValueError('Invalid configuration. Key %s must be specified' %(key))
class GraspQualityConfigFactory:
""" Helper class to automatically create grasp quality configurations of different types. """
@staticmethod
def create_config(config):
""" Automatically create a quality config from a dictionary.
Parameters
----------
config : :obj:`dict`
dictionary mapping parameter names to parameter values
"""
if config['quality_type'] == 'quasi_static':
return QuasiStaticGraspQualityConfig(config)
elif config['quality_type'] == 'robust_quasi_static':
return RobustQuasiStaticGraspQualityConfig(config)
else:
raise ValueError('Quality config type %s not supported' %(config['type']))

View File

@@ -0,0 +1,226 @@
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
"""
User-friendly functions for computing grasp quality metrics.
Author: Jeff Mahler
"""
from abc import ABCMeta, abstractmethod
import copy
import itertools as it
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import scipy.stats
import sys
import time
from .grasp import Grasp
from .graspable_object import GraspableObject
from .graspable_object import GraspQualityConfig
from .robust_grasp_quality import RobustPointGraspMetrics3D
from .random_variables import GraspableObjectPoseGaussianRV, ParallelJawGraspPoseGaussianRV, ParamsGaussianRV
from .quality import PointGraspMetrics3D
from autolab_core import RigidTransform
import IPython
class GraspQualityResult:
""" Stores the results of grasp quality computation.
Attributes
----------
quality : float
value of quality
uncertainty : float
uncertainty estimate of the quality value
quality_config : :obj:`GraspQualityConfig`
"""
def __init__(self, quality, uncertainty=0.0, quality_config=None):
self.quality = quality
self.uncertainty = uncertainty
self.quality_config = quality_config
# class GraspQualityFunction(object, metaclass=ABCMeta):
class GraspQualityFunction(object):
"""
Abstraction for grasp quality functions to make scripts for labeling with quality functions simple and readable.
Attributes
----------
graspable : :obj:`GraspableObject3D`
object to evaluate grasp quality on
quality_config : :obj:`GraspQualityConfig`
set of parameters to evaluate grasp quality
"""
__metaclass__ = ABCMeta
def __init__(self, graspable, quality_config):
# check valid types
if not isinstance(graspable, GraspableObject):
raise ValueError('Must provide GraspableObject')
if not isinstance(quality_config, GraspQualityConfig):
raise ValueError('Must provide GraspQualityConfig')
# set member variables
self.graspable_ = graspable
self.quality_config_ = quality_config
self._setup()
def __call__(self, grasp):
return self.quality(grasp)
@abstractmethod
def _setup(self):
""" Sets up common variables for grasp quality evaluations """
pass
@abstractmethod
def quality(self, grasp):
""" Compute grasp quality.
Parameters
----------
grasp : :obj:`GraspableObject3D`
grasp to quality quality on
Returns
-------
:obj:`GraspQualityResult`
result of quality computation
"""
pass
class QuasiStaticQualityFunction(GraspQualityFunction):
""" Grasp quality metric using a quasi-static model.
"""
def __init__(self, graspable, quality_config):
GraspQualityFunction.__init__(self, graspable, quality_config)
@property
def graspable(self):
return self.graspable_
@graspable.setter
def graspable(self, obj):
self.graspable_ = obj
def _setup(self):
if self.quality_config_.quality_type != 'quasi_static':
raise ValueError('Quality configuration must be quasi static')
def quality(self, grasp):
""" Compute grasp quality using a quasistatic method.
Parameters
----------
grasp : :obj:`GraspableObject3D`
grasp to quality quality on
Returns
-------
:obj:`GraspQualityResult`
result of quality computation
"""
if not isinstance(grasp, Grasp):
raise ValueError('Must provide Grasp object to compute quality')
quality = PointGraspMetrics3D.grasp_quality(grasp, self.graspable_,
self.quality_config_)
return GraspQualityResult(quality, quality_config=self.quality_config_)
class RobustQuasiStaticQualityFunction(GraspQualityFunction):
""" Grasp quality metric using a robust quasi-static model (average over random perturbations)
"""
def __init__(self, graspable, quality_config, T_obj_world=RigidTransform(from_frame='obj', to_frame='world')):
self.T_obj_world_ = T_obj_world
GraspQualityFunction.__init__(self, graspable, quality_config)
@property
def graspable(self):
return self.graspable_
@graspable.setter
def graspable(self, obj):
self.graspable_ = obj
self._setup()
def _setup(self):
if self.quality_config_.quality_type != 'robust_quasi_static':
raise ValueError('Quality configuration must be robust quasi static')
self.graspable_rv_ = GraspableObjectPoseGaussianRV(self.graspable_,
self.T_obj_world_,
self.quality_config_.obj_uncertainty)
self.params_rv_ = ParamsGaussianRV(self.quality_config_,
self.quality_config_.params_uncertainty)
def quality(self, grasp):
""" Compute grasp quality using a robust quasistatic method.
Parameters
----------
grasp : :obj:`GraspableObject3D`
grasp to quality quality on
Returns
-------
:obj:`GraspQualityResult`
result of quality computation
"""
if not isinstance(grasp, Grasp):
raise ValueError('Must provide Grasp object to compute quality')
grasp_rv = ParallelJawGraspPoseGaussianRV(grasp,
self.quality_config_.grasp_uncertainty)
mean_q, std_q = RobustPointGraspMetrics3D.expected_quality(grasp_rv,
self.graspable_rv_,
self.params_rv_,
self.quality_config_)
return GraspQualityResult(mean_q, std_q, quality_config=self.quality_config_)
class GraspQualityFunctionFactory:
@staticmethod
def create_quality_function(graspable, quality_config):
""" Creates a quality function for a particular object based on a configuration, which can be passed directly from a configuration file.
Parameters
----------
graspable : :obj:`GraspableObject3D`
object to create quality function for
quality_config : :obj:`GraspQualityConfig`
parameters for quality function
"""
# check valid types
if not isinstance(graspable, GraspableObject):
raise ValueError('Must provide GraspableObject')
if not isinstance(quality_config, GraspQualityConfig):
raise ValueError('Must provide GraspQualityConfig')
if quality_config.quality_type == 'quasi_static':
return QuasiStaticQualityFunction(graspable, quality_config)
elif quality_config.quality_type == 'robust_quasi_static':
return RobustQuasiStaticQualityFunction(graspable, quality_config)
else:
raise ValueError('Grasp quality type %s not supported' %(quality_config.quality_type))

View File

@@ -0,0 +1,232 @@
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
"""
Encapsulates data and operations on a 2D or 3D object that can be grasped
Author: Jeff Mahler
"""
from abc import ABCMeta, abstractmethod
import copy
import logging
import numpy as np
from .meshpy import mesh as m
from .meshpy import sdf as s
import IPython
import matplotlib.pyplot as plt
from autolab_core import RigidTransform, SimilarityTransform
# class GraspableObject(metaclass=ABCMeta):
class GraspableObject:
""" Encapsulates geometric structures for computing contact in grasping.
Attributes
----------
sdf : :obj:`Sdf3D`
signed distance field, for quickly computing contact points
mesh : :obj:`Mesh3D`
3D triangular mesh to specify object geometry, should match SDF
key : :obj:`str`
object identifier, usually given from the database
model_name : :obj:`str`
name of the object mesh as a .obj file, for use in collision checking
mass : float
mass of the object
convex_pieces : :obj:`list` of :obj:`Mesh3D`
convex decomposition of the object geom for collision checking
"""
__metaclass__ = ABCMeta
def __init__(self, sdf, mesh, key='', model_name='', mass=1.0, convex_pieces=None):
self.sdf_ = sdf
self.mesh_ = mesh
self.key_ = key
self.model_name_ = model_name # for OpenRave usage, gross!
self.mass_ = mass
self.convex_pieces_ = convex_pieces
@property
def sdf(self):
return self.sdf_
@property
def mesh(self):
return self.mesh_
@property
def mass(self):
return self.mass_
@property
def key(self):
return self.key_
@property
def model_name(self):
return self.model_name_
@property
def convex_pieces(self):
return self.convex_pieces_
class GraspableObject3D(GraspableObject):
""" 3D Graspable object for computing contact in grasping.
Attributes
----------
sdf : :obj:`Sdf3D`
signed distance field, for quickly computing contact points
mesh : :obj:`Mesh3D`
3D triangular mesh to specify object geometry, should match SDF
key : :obj:`str`
object identifier, usually given from the database
model_name : :obj:`str`
name of the object mesh as a .obj file, for use in collision checking
mass : float
mass of the object
convex_pieces : :obj:`list` of :obj:`Mesh3D`
convex decomposition of the object geom for collision checking
"""
def __init__(self, sdf, mesh, key='',
model_name='', mass=1.0,
convex_pieces=None):
if not isinstance(sdf, s.Sdf3D):
raise ValueError('Must initialize 3D graspable object with 3D sdf')
if not isinstance(mesh, m.Mesh3D):
raise ValueError('Must initialize 3D graspable object with 3D mesh')
GraspableObject.__init__(self, sdf, mesh, key=key,
model_name=model_name, mass=mass,
convex_pieces=convex_pieces)
def moment_arm(self, x):
""" Computes the moment arm to a point x.
Parameters
----------
x : 3x1 :obj:`numpy.ndarray`
point to get moment arm for
Returns
-------
3x1 :obj:`numpy.ndarray`
"""
return x - self.mesh.center_of_mass
def rescale(self, scale):
""" Rescales uniformly by a given factor.
Parameters
----------
scale : float
the amount to scale the object
Returns
-------
:obj:`GraspableObject3D`
the graspable object rescaled by the given factor
"""
stf = SimilarityTransform(scale=scale)
sdf_rescaled = self.sdf_.rescale(scale)
mesh_rescaled = self.mesh_.transform(stf)
convex_pieces_rescaled = None
if self.convex_pieces_ is not None:
convex_pieces_rescaled = []
for convex_piece in self.convex_pieces_:
convex_piece_rescaled = convex_piece.transform(stf)
convex_pieces_rescaled.append(convex_piece_rescaled)
return GraspableObject3D(sdf_rescaled, mesh_rescaled, key=self.key,
model_name=self.model_name, mass=self.mass,
convex_pieces=convex_pieces_rescaled)
def transform(self, delta_T):
""" Transform by a delta transform.
Parameters
----------
delta_T : :obj:`RigidTransform`
the transformation from the current reference frame to the alternate reference frame
Returns
-------
:obj:`GraspableObject3D`
graspable object trasnformed by the delta
"""
sdf_tf = self.sdf_.transform(delta_T)
mesh_tf = self.mesh_.transform(delta_T)
convex_pieces_tf = None
if self.convex_pieces_ is not None:
convex_pieces_tf = []
for convex_piece in self.convex_pieces_:
convex_piece_tf = convex_piece.transform(delta_T)
convex_pieces_tf.append(convex_piece_tf)
return GraspableObject3D(sdf_tf, mesh_tf, key=self.key,
model_name=self.model_name, mass=self.mass,
convex_pieces=convex_pieces_tf)
def surface_information(self, grasp, width, num_steps, plot=False, direction1=None, direction2=None):
""" Returns the patches on this object for a given grasp.
Parameters
----------
grasp : :obj:`ParallelJawPtGrasp3D`
grasp to get the patch information for
width : float
width of jaw opening
num_steps : int
number of steps
plot : bool
whether to plot the intermediate computation, for debugging
direction1 : normalized 3x1 :obj:`numpy.ndarray`
direction along which to compute the surface information for the first jaw, if None then defaults to grasp axis
direction2 : normalized 3x1 :obj:`numpy.ndarray`
direction along which to compute the surface information for the second jaw, if None then defaults to grasp axis
Returns
-------
:obj:`list` of :obj:`SurfaceWindow`
surface patches, one for each contact
"""
contacts_found, contacts = grasp.close_fingers(self)#, vis=True)
if not contacts_found:
raise ValueError('Failed to find contacts')
contact1, contact2 = contacts
if plot:
plt.figure()
contact1.plot_friction_cone()
contact2.plot_friction_cone()
ax = plt.gca(projection = '3d')
ax.set_xlim3d(0, self.sdf.dims_[0])
ax.set_ylim3d(0, self.sdf.dims_[1])
ax.set_zlim3d(0, self.sdf.dims_[2])
window1 = contact1.surface_information(width, num_steps, direction=direction1)
window2 = contact2.surface_information(width, num_steps, direction=direction2)
return window1, window2, contact1, contact2

View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017 Berkeley AUTOLAB & University of California, Berkeley
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,26 @@
# try:
# # from meshpy import meshrender
# import meshrender
# except:
# print('Unable to import meshrender shared library! Rendering will not work. Likely due to missing Boost.Numpy')
# print('Boost.Numpy can be installed following the instructions in https://github.com/ndarray/Boost.NumPy')
from .mesh import Mesh3D
# from .image_converter import ImageToMeshConverter
from .obj_file import ObjFile
# from .off_file import OffFile
# from .render_modes import RenderMode
from .sdf import Sdf, Sdf3D
from .sdf_file import SdfFile
from .stable_pose import StablePose
from . import mesh
from . import obj_file
from . import sdf_file
# from .stp_file import StablePoseFile
# from .urdf_writer import UrdfWriter, convex_decomposition
# from .lighting import MaterialProperties, LightingProperties
# from .mesh_renderer import ViewsphereDiscretizer, PlanarWorksurfaceDiscretizer, VirtualCamera, SceneObject
# from .random_variables import CameraSample, RenderSample, UniformViewsphereRandomVariable, \
# UniformPlanarWorksurfaceRandomVariable, UniformPlanarWorksurfaceImageRandomVariable
__all__ = ['Mesh3D','ObjFile','Sdf','Sdf3D','SdfFile','StablePose','mesh','obj_file','sdf_file']

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,150 @@
"""
File for loading and saving meshes from .OBJ files
Author: Jeff Mahler
"""
import os
try:
from . import mesh
except ImportError:
import mesh
class ObjFile(object):
"""
A Wavefront .obj file reader and writer.
Attributes
----------
filepath : :obj:`str`
The full path to the .obj file associated with this reader/writer.
"""
def __init__(self, filepath):
"""Construct and initialize a .obj file reader and writer.
Parameters
----------
filepath : :obj:`str`
The full path to the desired .obj file
Raises
------
ValueError
If the file extension is not .obj.
"""
self.filepath_ = filepath
file_root, file_ext = os.path.splitext(self.filepath_)
if file_ext != '.obj':
raise ValueError('Extension %s invalid for OBJs' %(file_ext))
@property
def filepath(self):
"""Returns the full path to the .obj file associated with this reader/writer.
Returns
-------
:obj:`str`
The full path to the .obj file associated with this reader/writer.
"""
return self.filepath_
def read(self):
"""Reads in the .obj file and returns a Mesh3D representation of that mesh.
Returns
-------
:obj:`Mesh3D`
A Mesh3D created from the data in the .obj file.
"""
numVerts = 0
verts = []
norms = None
faces = []
tex_coords = []
face_norms = []
f = open(self.filepath_, 'r')
for line in f:
# Break up the line by whitespace
vals = line.split()
if len(vals) > 0:
# Look for obj tags (see http://en.wikipedia.org/wiki/Wavefront_.obj_file)
if vals[0] == 'v':
# Add vertex
v = list(map(float, vals[1:4]))
verts.append(v)
if vals[0] == 'vn':
# Add normal
if norms is None:
norms = []
n = list(map(float, vals[1:4]))
norms.append(n)
if vals[0] == 'f':
# Add faces (includes vertex indices, texture coordinates, and normals)
vi = []
vti = []
nti = []
if vals[1].find('/') == -1:
vi = list(map(int, vals[1:]))
vi = [i - 1 for i in vi]
else:
for j in range(1, len(vals)):
# Break up like by / to read vert inds, tex coords, and normal inds
val = vals[j]
tokens = val.split('/')
for i in range(len(tokens)):
if i == 0:
vi.append(int(tokens[i]) - 1) # adjust for python 0 - indexing
elif i == 1:
if tokens[i] != '':
vti.append(int(tokens[i]))
elif i == 2:
nti.append(int(tokens[i]))
faces.append(vi)
# Below two lists are currently not in use
tex_coords.append(vti)
face_norms.append(nti)
f.close()
return mesh.Mesh3D(verts, faces, norms)
def write(self, mesh):
"""Writes a Mesh3D object out to a .obj file format
Parameters
----------
mesh : :obj:`Mesh3D`
The Mesh3D object to write to the .obj file.
Note
----
Does not support material files or texture coordinates.
"""
f = open(self.filepath_, 'w')
vertices = mesh.vertices
faces = mesh.triangles
normals = mesh.normals
# write human-readable header
f.write('###########################################################\n')
f.write('# OBJ file generated by UC Berkeley Automation Sciences Lab\n')
f.write('#\n')
f.write('# Num Vertices: %d\n' %(vertices.shape[0]))
f.write('# Num Triangles: %d\n' %(faces.shape[0]))
f.write('#\n')
f.write('###########################################################\n')
f.write('\n')
for v in vertices:
f.write('v %f %f %f\n' %(v[0], v[1], v[2]))
# write the normals list
if normals is not None and normals.shape[0] > 0:
for n in normals:
f.write('vn %f %f %f\n' %(n[0], n[1], n[2]))
# write the normals list
for t in faces:
f.write('f %d %d %d\n' %(t[0]+1, t[1]+1, t[2]+1)) # convert back to 1-indexing
f.close()

View File

@@ -0,0 +1,773 @@
"""
Definition of SDF Class
Author: Sahaana Suri, Jeff Mahler, and Matt Matl
**Currently assumes clean input**
"""
from abc import ABCMeta, abstractmethod
import logging
import numpy as np
from numbers import Number
import time
from autolab_core import RigidTransform, SimilarityTransform, PointCloud, Point, NormalCloud
from sys import version_info
if version_info[0] != 3:
range = xrange
# class Sdf(metaclass=ABCMeta): # work for python3
class Sdf():
""" Abstract class for signed distance fields.
"""
__metaclass__ = ABCMeta
##################################################################
# General SDF Properties
##################################################################
@property
def dimensions(self):
"""SDF dimension information.
Returns
-------
:obj:`numpy.ndarray` of int
The ndarray that contains the dimensions of the sdf.
"""
return self.dims_
@property
def origin(self):
"""The location of the origin in the SDF grid.
Returns
-------
:obj:`numpy.ndarray` of float
The 2- or 3-ndarray that contains the location of
the origin of the mesh grid in real space.
"""
return self.origin_
@property
def resolution(self):
"""The grid resolution (how wide each grid cell is).
Returns
-------
float
The width of each grid cell.
"""
return self.resolution_
@property
def center(self):
"""Center of grid.
This basically transforms the world frame to grid center.
Returns
-------
:obj:`numpy.ndarray`
"""
return self.center_
@property
def gradients(self):
"""Gradients of the SDF.
Returns
-------
:obj:`list` of :obj:`numpy.ndarray` of float
A list of ndarrays of the same dimension as the SDF. The arrays
are in axis order and specify the gradients for that axis
at each point.
"""
return self.gradients_
@property
def data(self):
"""The SDF data.
Returns
-------
:obj:`numpy.ndarray` of float
The 2- or 3-dimensional ndarray that holds the grid of signed
distances.
"""
return self.data_
##################################################################
# General SDF Abstract Methods
##################################################################
@abstractmethod
def transform(self, tf):
"""Returns a new SDF transformed by similarity tf.
"""
pass
@abstractmethod
def transform_pt_obj_to_grid(self, x_world, direction=False):
"""Transforms points from world frame to grid frame
"""
pass
@abstractmethod
def transform_pt_grid_to_obj(self, x_grid, direction=False):
"""Transforms points from grid frame to world frame
"""
pass
@abstractmethod
def surface_points(self):
"""Returns the points on the surface.
Returns
-------
:obj:`tuple` of :obj:`numpy.ndarray` of int, :obj:`numpy.ndarray` of float
The points on the surface and the signed distances at those points.
"""
pass
@abstractmethod
def __getitem__(self, coords):
"""Returns the signed distance at the given coordinates.
Parameters
----------
coords : :obj:`numpy.ndarray` of int
A 2- or 3-dimensional ndarray that indicates the desired
coordinates in the grid.
Returns
-------
float
The signed distance at the given coords (interpolated).
"""
pass
##################################################################
# Universal SDF Methods
##################################################################
def transform_to_world(self):
"""Returns an sdf object with center in the world frame of reference.
"""
return self.transform(self.pose_, scale=self.scale_)
def center_world(self):
"""Center of grid (basically transforms world frame to grid center)
"""
return self.transform_pt_grid_to_obj(self.center_)
def on_surface(self, coords):
"""Determines whether or not a point is on the object surface.
Parameters
----------
coords : :obj:`numpy.ndarray` of int
A 2- or 3-dimensional ndarray that indicates the desired
coordinates in the grid.
Returns
-------
:obj:`tuple` of bool, float
Is the point on the object's surface, and what
is the signed distance at that point?
"""
sdf_val = self[coords]
if np.abs(sdf_val) < self.surface_thresh_:
return True, sdf_val
return False, sdf_val
def is_out_of_bounds(self, coords):
"""Returns True if coords is an out of bounds access.
Parameters
----------
coords : :obj:`numpy.ndarray` of int
A 2- or 3-dimensional ndarray that indicates the desired
coordinates in the grid.
Returns
-------
bool
Are the coordinates in coords out of bounds?
"""
return np.array(coords < 0).any() or np.array(coords >= self.dims_).any()
def _compute_gradients(self):
"""Computes the gradients of the SDF.
Returns
-------
:obj:`list` of :obj:`numpy.ndarray` of float
A list of ndarrays of the same dimension as the SDF. The arrays
are in axis order and specify the gradients for that axis
at each point.
"""
self.gradients_ = np.gradient(self.data_)
class Sdf3D(Sdf):
# static indexing vars
num_interpolants = 8
min_coords_x = [0, 2, 3, 5]
max_coords_x = [1, 4, 6, 7]
min_coords_y = [0, 1, 3, 6]
max_coords_y = [2, 4, 5, 7]
min_coords_z = [0, 1, 2, 4]
max_coords_z = [3, 5, 6, 7]
def __init__(self, sdf_data, origin, resolution, use_abs=False,
T_sdf_world=RigidTransform(from_frame='sdf', to_frame='world')):
self.data_ = sdf_data
self.origin_ = origin
self.resolution_ = resolution
self.dims_ = self.data_.shape
# set up surface params
self.surface_thresh_ = self.resolution_ * np.sqrt(2) / 2
self.surface_points_ = None
self.surface_points_w_ = None
self.surface_vals_ = None
self._compute_surface_points()
# resolution is max dist from surface when surf is orthogonal to diagonal grid cells
spts, _ = self.surface_points()
self.center_ = 0.5 * (np.min(spts, axis=0) + np.max(spts, axis=0))
self.points_buf_ = np.zeros([Sdf3D.num_interpolants, 3], dtype=np.int)
self.coords_buf_ = np.zeros([3, ])
self.pts_ = None
# tranform sdf basis to grid (X and Z axes are flipped!)
t_world_grid = self.resolution_ * self.center_
s_world_grid = 1.0 / self.resolution_
# FIXME: Since in autolab_core==0.0.4, it applies (un)scale transformation before translation in SimilarityTransform
# here we shoule use unscaled origin to get the correct world coordinates
# PS: in world coordinate, the origin here is the left-bottom-down corner of the padded bounding squre box
t_grid_sdf = self.origin
self.T_grid_sdf_ = SimilarityTransform(translation=t_grid_sdf,
scale=self.resolution,
from_frame='grid',
to_frame='sdf')
self.T_sdf_world_ = T_sdf_world
self.T_grid_world_ = self.T_sdf_world_ * self.T_grid_sdf_
self.T_sdf_grid_ = self.T_grid_sdf_.inverse()
self.T_world_grid_ = self.T_grid_world_.inverse()
self.T_world_sdf_ = self.T_sdf_world_.inverse()
# optionally use only the absolute values (useful for non-closed meshes in 3D)
self.use_abs_ = use_abs
if use_abs:
self.data_ = np.abs(self.data_)
self._compute_gradients()
self.surface_points_w_ = self.transform_pt_grid_to_obj(self.surface_points_.T).T
surface, _ = self.surface_points(grid_basis=True)
self.surface_for_signed_val = surface[np.random.choice(len(surface), 1000)] # FIXME: for speed
def transform(self, delta_T):
""" Creates a new SDF with a given pose with respect to world coordinates.
Parameters
----------
delta_T : :obj:`autolab_core.RigidTransform`
transform from cur sdf to transformed sdf coords
"""
new_T_sdf_world = self.T_sdf_world_ * delta_T.inverse().as_frames('sdf', 'sdf')
return Sdf3D(self.data_, self.origin_, self.resolution_, use_abs=self.use_abs_,
T_sdf_world=new_T_sdf_world)
def _signed_distance(self, coords):
"""Returns the signed distance at the given coordinates, interpolating
if necessary.
Parameters
----------
coords : :obj:`numpy.ndarray` of int
A 3-dimensional ndarray that indicates the desired
coordinates in the grid.
Returns
-------
float
The signed distance at the given coords (interpolated).
Raises
------
IndexError
If the coords vector does not have three entries.
"""
if len(coords) != 3:
raise IndexError('Indexing must be 3 dimensional')
if self.is_out_of_bounds(coords):
logging.debug('Out of bounds access. Snapping to SDF dims')
# find cloest surface point
surface = self.surface_for_signed_val
closest_surface_coord = surface[np.argmin(np.linalg.norm(surface - coords, axis=-1))]
sd = np.linalg.norm(self.transform_pt_grid_to_obj(closest_surface_coord) -
self.transform_pt_grid_to_obj(coords)) + \
self.data_[closest_surface_coord[0], closest_surface_coord[1], closest_surface_coord[2]]
else:
# snap to grid dims
self.coords_buf_[0] = max(0, min(coords[0], self.dims_[0] - 1))
self.coords_buf_[1] = max(0, min(coords[1], self.dims_[1] - 1))
self.coords_buf_[2] = max(0, min(coords[2], self.dims_[2] - 1))
# regular indexing if integers
if np.issubdtype(type(coords[0]), np.integer) and \
np.issubdtype(type(coords[1]), np.integer) and \
np.issubdtype(type(coords[2]), np.integer):
return self.data_[int(self.coords_buf_[0]), int(self.coords_buf_[1]), int(self.coords_buf_[2])]
# otherwise interpolate
min_coords = np.floor(self.coords_buf_)
max_coords = min_coords + 1 # assumed to be on grid
self.points_buf_[Sdf3D.min_coords_x, 0] = min_coords[0]
self.points_buf_[Sdf3D.max_coords_x, 0] = max_coords[0]
self.points_buf_[Sdf3D.min_coords_y, 1] = min_coords[1]
self.points_buf_[Sdf3D.max_coords_y, 1] = max_coords[1]
self.points_buf_[Sdf3D.min_coords_z, 2] = min_coords[2]
self.points_buf_[Sdf3D.max_coords_z, 2] = max_coords[2]
# bilinearly interpolate points
sd = 0.0
for i in range(Sdf3D.num_interpolants):
p = self.points_buf_[i, :]
if self.is_out_of_bounds(p):
v = 0.0
else:
v = self.data_[p[0], p[1], p[2]]
w = np.prod(-np.abs(p - self.coords_buf_) + 1)
sd = sd + w * v
return sd
def __getitem__(self, coords):
"""Returns the signed distance at the given coordinates.
Parameters
----------
coords : :obj:`numpy.ndarray` of int
A or 3-dimensional ndarray that indicates the desired
coordinates in the grid.
Returns
-------
float
The signed distance at the given coords (interpolated).
Raises
------
IndexError
If the coords vector does not have three entries.
"""
return self._signed_distance(coords)
def gradient(self, coords):
"""Returns the SDF gradient at the given coordinates, interpolating if necessary
Parameters
----------
coords : :obj:`numpy.ndarray` of int
A 3-dimensional ndarray that indicates the desired
coordinates in the grid.
Returns
-------
float
The gradient at the given coords (interpolated).
Raises
------
IndexError
If the coords vector does not have three entries.
"""
if len(coords) != 3:
raise IndexError('Indexing must be 3 dimensional')
# log warning if out of bounds access
if self.is_out_of_bounds(coords):
logging.debug('Out of bounds access. Snapping to SDF dims')
# snap to grid dims
self.coords_buf_[0] = max(0, min(coords[0], self.dims_[0] - 1))
self.coords_buf_[1] = max(0, min(coords[1], self.dims_[1] - 1))
self.coords_buf_[2] = max(0, min(coords[2], self.dims_[2] - 1))
# regular indexing if integers
if type(coords[0]) is int and type(coords[1]) is int and type(coords[2]) is int:
self.coords_buf_ = self.coords_buf_.astype(np.int)
return self.data_[self.coords_buf_[0], self.coords_buf_[1], self.coords_buf_[2]]
# otherwise interpolate
min_coords = np.floor(self.coords_buf_)
max_coords = min_coords + 1
self.points_buf_[Sdf3D.min_coords_x, 0] = min_coords[0]
self.points_buf_[Sdf3D.max_coords_x, 0] = min_coords[0]
self.points_buf_[Sdf3D.min_coords_y, 1] = min_coords[1]
self.points_buf_[Sdf3D.max_coords_y, 1] = max_coords[1]
self.points_buf_[Sdf3D.min_coords_z, 2] = min_coords[2]
self.points_buf_[Sdf3D.max_coords_z, 2] = max_coords[2]
# bilinear interpolation
g = np.zeros(3)
gp = np.zeros(3)
w_sum = 0.0
for i in range(Sdf3D.num_interpolants):
p = self.points_buf_[i, :]
if self.is_out_of_bounds(p):
gp[0] = 0.0
gp[1] = 0.0
gp[2] = 0.0
else:
gp[0] = self.gradients_[0][p[0], p[1], p[2]]
gp[1] = self.gradients_[1][p[0], p[1], p[2]]
gp[2] = self.gradients_[2][p[0], p[1], p[2]]
w = np.prod(-np.abs(p - self.coords_buf_) + 1)
g = g + w * gp
return g
def curvature(self, coords, delta=0.001):
"""
Returns an approximation to the local SDF curvature (Hessian) at the
given coordinate in grid basis.
Parameters
---------
coords : numpy 3-vector
the grid coordinates at which to get the curvature
delta :
Returns
-------
curvature : 3x3 ndarray of the curvature at the surface points
"""
# perturb local coords
coords_x_up = coords + np.array([delta, 0, 0])
coords_x_down = coords + np.array([-delta, 0, 0])
coords_y_up = coords + np.array([0, delta, 0])
coords_y_down = coords + np.array([0, -delta, 0])
coords_z_up = coords + np.array([0, 0, delta])
coords_z_down = coords + np.array([0, 0, -delta])
# get gradient
grad_x_up = self.gradient(coords_x_up)
grad_x_down = self.gradient(coords_x_down)
grad_y_up = self.gradient(coords_y_up)
grad_y_down = self.gradient(coords_y_down)
grad_z_up = self.gradient(coords_z_up)
grad_z_down = self.gradient(coords_z_down)
# finite differences
curvature_x = (grad_x_up - grad_x_down) / (4 * delta)
curvature_y = (grad_y_up - grad_y_down) / (4 * delta)
curvature_z = (grad_z_up - grad_z_down) / (4 * delta)
curvature = np.c_[curvature_x, np.c_[curvature_y, curvature_z]]
curvature = curvature + curvature.T
return curvature
def surface_normal(self, coords, delta=1.5):
"""Returns the sdf surface normal at the given coordinates by
computing the tangent plane using SDF interpolation.
Parameters
----------
coords : :obj:`numpy.ndarray` of int
A 3-dimensional ndarray that indicates the desired
coordinates in the grid.
delta : float
A radius for collecting surface points near the target coords
for calculating the surface normal.
Returns
-------
:obj:`numpy.ndarray` of float
The 3-dimensional ndarray that represents the surface normal.
Raises
------
IndexError
If the coords vector does not have three entries.
"""
if len(coords) != 3:
raise IndexError('Indexing must be 3 dimensional')
# log warning if out of bounds access
if self.is_out_of_bounds(coords):
logging.debug('Out of bounds access. Snapping to SDF dims')
# snap to grid dims
# coords[0] = max(0, min(coords[0], self.dims_[0] - 1))
# coords[1] = max(0, min(coords[1], self.dims_[1] - 1))
# coords[2] = max(0, min(coords[2], self.dims_[2] - 1))
index_coords = np.zeros(3)
# check points on surface
sdf_val = self[coords]
if np.abs(sdf_val) >= self.surface_thresh_:
logging.debug('Cannot compute normal. Point must be on surface')
return None
# collect all surface points within the delta sphere
X = []
d = np.zeros(3)
dx = -delta
while dx <= delta:
dy = -delta
while dy <= delta:
dz = -delta
while dz <= delta:
d = np.array([dx, dy, dz])
if dx != 0 or dy != 0 or dz != 0:
d = delta * d / np.linalg.norm(d)
index_coords[0] = coords[0] + d[0]
index_coords[1] = coords[1] + d[1]
index_coords[2] = coords[2] + d[2]
sdf_val = self[index_coords]
if np.abs(sdf_val) < self.surface_thresh_:
X.append([index_coords[0], index_coords[1], index_coords[2], sdf_val])
dz += delta
dy += delta
dx += delta
# fit a plane to the surface points
X.sort(key=lambda x: x[3])
X = np.array(X)[:, :3]
A = X - np.mean(X, axis=0)
try:
U, S, V = np.linalg.svd(A.T)
n = U[:, 2]
except:
logging.warning('Tangent plane does not exist. Returning None.')
return None
# make sure surface normal is outward
# referenced from Zhou Xian's github, but if the model is not watertight, this method may fail
# https://github.com/zhouxian/meshpy_berkeley/commit/96428f3b7af618a0828a7eb88f22541cdafacfc7
if self[coords + n * 0.01] < self[coords]:
n = -n
return n
def _compute_surface_points(self):
surface_points = np.where(np.abs(self.data_) < self.surface_thresh_)
x = surface_points[0]
y = surface_points[1]
z = surface_points[2]
self.surface_points_ = np.c_[x, np.c_[y, z]]
self.surface_vals_ = self.data_[self.surface_points_[:, 0], self.surface_points_[:, 1],
self.surface_points_[:, 2]]
def surface_points(self, grid_basis=True):
"""Returns the points on the surface.
Parameters
----------
grid_basis : bool
If False, the surface points are transformed to the world frame.
If True (default), the surface points are left in grid coordinates.
Returns
-------
:obj:`tuple` of :obj:`numpy.ndarray` of int, :obj:`numpy.ndarray` of float
The points on the surface and the signed distances at those points.
"""
if not grid_basis:
return self.surface_points_w_, self.surface_vals_
return self.surface_points_, self.surface_vals_
def rescale(self, scale):
""" Rescale an SDF by a given scale factor.
Parameters
----------
scale : float
the amount to scale the SDF
Returns
-------
:obj:`Sdf3D`
new sdf with given scale
"""
resolution_tf = scale * self.resolution_
return Sdf3D(self.data_, self.origin_, resolution_tf, use_abs=self.use_abs_,
T_sdf_world=self.T_sdf_world_)
def transform_dense(self, delta_T, detailed=False):
""" Transform the grid by pose T and scale with canonical reference
frame at the SDF center with axis alignment.
Parameters
----------
delta_T : SimilarityTransform
the transformation from the current frame of reference to the new frame of reference
detailed : bool
whether or not to use interpolation
Returns
-------
:obj:`Sdf3D`
new sdf with grid warped by T
"""
# map all surface points to their new location
start_t = time.clock()
# form points array
if self.pts_ is None:
[x_ind, y_ind, z_ind] = np.indices(self.dims_)
self.pts_ = np.c_[x_ind.flatten().T, np.c_[y_ind.flatten().T, z_ind.flatten().T]].astype(np.float32)
# transform points
num_pts = self.pts_.shape[0]
pts_sdf = self.T_grid_sdf_ * PointCloud(self.pts_.T, frame='grid')
pts_sdf_tf = delta_T.as_frames('sdf', 'sdf') * pts_sdf
pts_grid_tf = self.T_sdf_grid_ * pts_sdf_tf
pts_tf = pts_grid_tf.data.T
all_points_t = time.clock()
# transform the center
origin_sdf = self.T_grid_sdf_ * Point(self.origin_, frame='grid')
origin_sdf_tf = delta_T.as_frames('sdf', 'sdf') * origin_sdf
origin_tf = self.T_sdf_grid_ * origin_sdf_tf
origin_tf = origin_tf.data
# use same resolution (since indices will be rescaled)
resolution_tf = self.resolution_
origin_res_t = time.clock()
# add each point to the new pose
if detailed:
sdf_data_tf = np.zeros([num_pts, 1])
for i in range(num_pts):
sdf_data_tf[i] = self[pts_tf[i, 0], pts_tf[i, 1], pts_tf[i, 2]]
else:
pts_tf_round = np.round(pts_tf).astype(np.int64)
# snap to closest boundary
pts_tf_round[:, 0] = np.max(np.c_[np.zeros([num_pts, 1]), pts_tf_round[:, 0]], axis=1)
pts_tf_round[:, 0] = np.min(np.c_[(self.dims_[0] - 1) * np.ones([num_pts, 1]), pts_tf_round[:, 0]], axis=1)
pts_tf_round[:, 1] = np.max(np.c_[np.zeros([num_pts, 1]), pts_tf_round[:, 1]], axis=1)
pts_tf_round[:, 1] = np.min(np.c_[(self.dims_[1] - 1) * np.ones([num_pts, 1]), pts_tf_round[:, 1]], axis=1)
pts_tf_round[:, 2] = np.max(np.c_[np.zeros([num_pts, 1]), pts_tf_round[:, 2]], axis=1)
pts_tf_round[:, 2] = np.min(np.c_[(self.dims_[2] - 1) * np.ones([num_pts, 1]), pts_tf_round[:, 2]], axis=1)
sdf_data_tf = self.data_[pts_tf_round[:, 0], pts_tf_round[:, 1], pts_tf_round[:, 2]]
sdf_data_tf_grid = sdf_data_tf.reshape(self.dims_)
tf_t = time.clock()
logging.debug('Sdf3D: Time to transform coords: %f' % (all_points_t - start_t))
logging.debug('Sdf3D: Time to transform origin: %f' % (origin_res_t - all_points_t))
logging.debug('Sdf3D: Time to transfer sd: %f' % (tf_t - origin_res_t))
return Sdf3D(sdf_data_tf_grid, origin_tf, resolution_tf, use_abs=self._use_abs_, T_sdf_world=self.T_sdf_world_)
def transform_pt_obj_to_grid(self, x_sdf, direction=False):
""" Converts a point in sdf coords to the grid basis. If direction then don't translate.
Parameters
----------
x_sdf : numpy 3xN ndarray or numeric scalar
points to transform from sdf basis in meters to grid basis
direction : bool
Returns
-------
x_grid : numpy 3xN ndarray or scalar
points in grid basis
"""
if isinstance(x_sdf, Number):
return self.T_world_grid_.scale * x_sdf
if direction:
points_sdf = NormalCloud(x_sdf.astype(np.float32), frame='world')
else:
points_sdf = PointCloud(x_sdf.astype(np.float32), frame='world')
x_grid = self.T_world_grid_ * points_sdf
return x_grid.data
def transform_pt_grid_to_obj(self, x_grid, direction=False):
""" Converts a point in grid coords to the world basis. If direction then don't translate.
Parameters
----------
x_grid : numpy 3xN ndarray or numeric scalar
points to transform from grid basis to sdf basis in meters
direction : bool
Returns
-------
x_sdf : numpy 3xN ndarray
points in sdf basis (meters)
"""
if isinstance(x_grid, Number):
return self.T_grid_world_.scale * x_grid
if direction:
points_grid = NormalCloud(x_grid.astype(np.float32), frame='grid')
else:
points_grid = PointCloud(x_grid.astype(np.float32), frame='grid')
x_sdf = self.T_grid_world_ * points_grid
return x_sdf.data
@staticmethod
def find_zero_crossing_linear(x1, y1, x2, y2):
""" Find zero crossing using linear approximation"""
# NOTE: use sparingly, approximations can be shoddy
d = x2 - x1
t1 = 0
t2 = np.linalg.norm(d)
v = d / t2
m = (y2 - y1) / (t2 - t1)
b = y1
t_zc = -b / m
x_zc = x1 + t_zc * v
return x_zc
@staticmethod
def find_zero_crossing_quadratic(x1, y1, x2, y2, x3, y3, eps=1.0):
""" Find zero crossing using quadratic approximation along 1d line"""
# compute coords along 1d line
v = x2 - x1
v = v / np.linalg.norm(v)
if v[v != 0].shape[0] == 0:
logging.error('Difference is 0. Probably a bug')
t1 = 0
t2 = (x2 - x1)[v != 0] / v[v != 0]
t2 = t2[0]
t3 = (x3 - x1)[v != 0] / v[v != 0]
t3 = t3[0]
# solve for quad approx
x1_row = np.array([t1 ** 2, t1, 1])
x2_row = np.array([t2 ** 2, t2, 1])
x3_row = np.array([t3 ** 2, t3, 1])
X = np.array([x1_row, x2_row, x3_row])
y_vec = np.array([y1, y2, y3])
try:
w = np.linalg.solve(X, y_vec)
except np.linalg.LinAlgError:
logging.error('Singular matrix. Probably a bug')
return None
# get positive roots
possible_t = np.roots(w)
t_zc = None
for i in range(possible_t.shape[0]):
if 0 <= possible_t[i] <= 10 and not np.iscomplex(possible_t[i]):
t_zc = possible_t[i]
# if no positive roots find min
if np.abs(w[0]) < 1e-10:
return None
if t_zc is None:
t_zc = -w[1] / (2 * w[0])
if t_zc < -eps or t_zc > eps:
return None
x_zc = x1 + t_zc * v
return x_zc

View File

@@ -0,0 +1,127 @@
'''
Reads and writes sdfs to file
Author: Jeff Mahler
'''
import numpy as np
import os
from . import sdf
class SdfFile:
"""
A Signed Distance Field .sdf file reader and writer.
Attributes
----------
filepath : :obj:`str`
The full path to the .sdf or .csv file associated with this reader/writer.
"""
def __init__(self, filepath):
"""Construct and initialize a .sdf file reader and writer.
Parameters
----------
filepath : :obj:`str`
The full path to the desired .sdf or .csv file
Raises
------
ValueError
If the file extension is not .sdf of .csv.
"""
self.filepath_ = filepath
file_root, file_ext = os.path.splitext(self.filepath_)
if file_ext == '.sdf':
self.use_3d_ = True
elif file_ext == '.csv':
self.use_3d_ = False
else:
raise ValueError('Extension %s invalid for SDFs' %(file_ext))
@property
def filepath(self):
"""Returns the full path to the file associated with this reader/writer.
Returns
-------
:obj:`str`
The full path to the file associated with this reader/writer.
"""
return self.filepath_
def read(self):
"""Reads in the SDF file and returns a Sdf object.
Returns
-------
:obj:`Sdf`
A Sdf created from the data in the file.
"""
if self.use_3d_:
return self._read_3d()
else:
return self._read_2d()
def _read_3d(self):
"""Reads in a 3D SDF file and returns a Sdf object.
Returns
-------
:obj:`Sdf3D`
A 3DSdf created from the data in the file.
"""
if not os.path.exists(self.filepath_):
return None
my_file = open(self.filepath_, 'r')
nx, ny, nz = [int(i) for i in my_file.readline().split()] #dimension of each axis should all be equal for LSH
ox, oy, oz = [float(i) for i in my_file.readline().split()] #shape origin
dims = np.array([nx, ny, nz])
origin = np.array([ox, oy, oz])
resolution = float(my_file.readline()) # resolution of the grid cells in original mesh coords
sdf_data = np.zeros(dims)
# loop through file, getting each value
count = 0
for k in range(nz):
for j in range(ny):
for i in range(nx):
sdf_data[i][j][k] = float(my_file.readline())
count += 1
my_file.close()
return sdf.Sdf3D(sdf_data, origin, resolution)
def _read_2d(self):
"""Reads in a 2D SDF file and returns a Sdf object.
Returns
-------
:obj:`Sdf2D`
A 2DSdf created from the data in the file.
"""
if not os.path.exists(self.filepath_):
return None
sdf_data = np.loadtxt(self.filepath_, delimiter=',')
return sdf.Sdf2D(sdf_data)
def write(self, sdf):
"""Writes an SDF to a file.
Parameters
----------
sdf : :obj:`Sdf`
An Sdf object to write out.
Note
----
This is not currently implemented or supported.
"""
pass
if __name__ == '__main__':
pass

View File

@@ -0,0 +1,86 @@
"""
A basic struct-like Stable Pose class to make accessing pose probability and rotation matrix easier
Author: Matt Matl and Nikhil Sharma
"""
import numpy as np
from autolab_core import RigidTransform
d_theta = np.deg2rad(1)
class StablePose(object):
"""A representation of a mesh's stable pose.
Attributes
----------
p : float
Probability associated with this stable pose.
r : :obj:`numpy.ndarray` of :obj`numpy.ndarray` of float
3x3 rotation matrix that rotates the mesh into the stable pose from
standardized coordinates.
x0 : :obj:`numpy.ndarray` of float
3D point in the mesh that is resting on the table.
face : :obj:`numpy.ndarray`
3D vector of indices corresponding to vertices forming the resting face
stp_id : :obj:`str`
A string identifier for the stable pose
T_obj_table : :obj:`RigidTransform`
A RigidTransform representation of the pose's rotation matrix.
"""
def __init__(self, p, r, x0, face=None, stp_id=-1):
"""Create a new stable pose object.
Parameters
----------
p : float
Probability associated with this stable pose.
r : :obj:`numpy.ndarray` of :obj`numpy.ndarray` of float
3x3 rotation matrix that rotates the mesh into the stable pose from
standardized coordinates.
x0 : :obj:`numpy.ndarray` of float
3D point in the mesh that is resting on the table.
face : :obj:`numpy.ndarray`
3D vector of indices corresponding to vertices forming the resting face
stp_id : :obj:`str`
A string identifier for the stable pose
"""
self.p = p
self.r = r
self.x0 = x0
self.face = face
self.id = stp_id
# fix stable pose bug
if np.abs(np.linalg.det(self.r) + 1) < 0.01:
self.r[1,:] = -self.r[1,:]
def __eq__(self, other):
""" Check equivalence by rotation about the z axis """
if not isinstance(other, StablePose):
raise ValueError('Can only compare stable pose objects')
R0 = self.r
R1 = other.r
dR = R1.T.dot(R0)
theta = 0
while theta < 2 * np.pi:
Rz = RigidTransform.z_axis_rotation(theta)
dR = R1.T.dot(Rz).dot(R0)
if np.linalg.norm(dR - np.eye(3)) < 1e-5:
return True
theta += d_theta
return False
@property
def T_obj_table(self):
return RigidTransform(rotation=self.r, from_frame='obj', to_frame='table')
@property
def T_obj_world(self):
T_world_obj = RigidTransform(rotation=self.r.T,
translation=self.x0,
from_frame='world',
to_frame='obj')
return T_world_obj.inverse()

View File

@@ -0,0 +1,819 @@
# -*- coding: utf-8 -*-
# """
# Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
# Permission to use, copy, modify, and distribute this software and its documentation for educational,
# research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
# hereby granted, provided that the above copyright notice, this paragraph and the following two
# paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
# Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
# 7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
#
# IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
# INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
# THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIgit clone https://github.com/jeffmahler/Boost.NumPy.git
# ED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
# HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
# """
# """
# Quasi-static point-based grasp quality metrics.
# Author: Jeff Mahler and Brian Hou
# """
import logging
# logging.root.setLevel(level=logging.DEBUG)
import numpy as np
try:
import pyhull.convex_hull as cvh
except:
# logging.warning('Failed to import pyhull')
pass
try:
import cvxopt as cvx
except:
# logging.warning('Failed to import cvx')
pass
import os
import scipy.spatial as ss
import sys
import time
from .grasp import PointGrasp
from. graspable_object import GraspableObject3D
from .grasp_quality_config import GraspQualityConfig
from .meshpy import mesh as m
from .meshpy import sdf as s
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import IPython
# turn off output logging
cvx.solvers.options['show_progress'] = False
class PointGraspMetrics3D:
""" Class to wrap functions for quasistatic point grasp quality metrics.
"""
@staticmethod
def grasp_quality(grasp, obj, params, contacts=None, vis=False):
"""
Computes the quality of a two-finger point grasps on a given object using a quasi-static model.
Parameters
----------
grasp : :obj:`ParallelJawPtGrasp3D`
grasp to evaluate
obj : :obj:`GraspableObject3D`
object to evaluate quality on
params : :obj:`GraspQualityConfig`
parameters of grasp quality function
"""
start = time.time()
if not isinstance(grasp, PointGrasp):
raise ValueError('Must provide a point grasp object')
if not isinstance(obj, GraspableObject3D):
raise ValueError('Must provide a 3D graspable object')
if not isinstance(params, GraspQualityConfig):
raise ValueError('Must provide GraspQualityConfig')
# read in params
method = params.quality_method
friction_coef = params.friction_coef
num_cone_faces = params.num_cone_faces
soft_fingers = params.soft_fingers
check_approach = params.check_approach
if not hasattr(PointGraspMetrics3D, method):
raise ValueError('Illegal point grasp metric %s specified' % (method))
# get point grasp contacts
contacts_start = time.time()
if contacts is None:
contacts_found, contacts = grasp.close_fingers(obj, check_approach=check_approach, vis=vis)
if not contacts_found:
logging.debug('Contacts not found')
return 0
if method == 'force_closure':
# Use fast force closure test (Nguyen 1988) if possible.
if len(contacts) == 2:
c1, c2 = contacts
return PointGraspMetrics3D.force_closure(c1, c2, friction_coef)
# Default to QP force closure test.
method = 'force_closure_qp'
# add the forces, torques, etc at each contact point
forces_start = time.time()
num_contacts = len(contacts)
forces = np.zeros([3, 0])
torques = np.zeros([3, 0])
normals = np.zeros([3, 0])
for i in range(num_contacts):
contact = contacts[i]
if vis:
if i == 0:
contact.plot_friction_cone(color='y')
else:
contact.plot_friction_cone(color='c')
# get contact forces
force_success, contact_forces, contact_outward_normal = contact.friction_cone(num_cone_faces, friction_coef)
if not force_success:
print('Force computation failed')
logging.debug('Force computation failed')
if params.all_contacts_required:
return 0
# get contact torques
torque_success, contact_torques = contact.torques(contact_forces)
if not torque_success:
print('Torque computation failed')
logging.debug('Torque computation failed')
if params.all_contacts_required:
return 0
# get the magnitude of the normal force that the contacts could apply
n = contact.normal_force_magnitude()
forces = np.c_[forces, n * contact_forces]
torques = np.c_[torques, n * contact_torques]
normals = np.c_[normals, n * -contact_outward_normal] # store inward pointing normals
if normals.shape[1] == 0:
logging.debug('No normals')
print('No normals')
return 0
# normalize torques
if 'torque_scaling' not in list(params.keys()):
torque_scaling = 1.0
if method == 'ferrari_canny_L1':
mn, mx = obj.mesh.bounding_box()
torque_scaling = 1.0 / np.median(mx)
print("torque scaling", torque_scaling)
params.torque_scaling = torque_scaling
if vis:
ax = plt.gca()
ax.set_xlim3d(0, obj.sdf.dims_[0])
ax.set_ylim3d(0, obj.sdf.dims_[1])
ax.set_zlim3d(0, obj.sdf.dims_[2])
plt.show()
# evaluate the desired quality metric
quality_start = time.time()
Q_func = getattr(PointGraspMetrics3D, method)
quality = Q_func(forces, torques, normals,
soft_fingers=soft_fingers,
params=params)
end = time.time()
logging.debug('Contacts took %.3f sec' % (forces_start - contacts_start))
logging.debug('Forces took %.3f sec' % (quality_start - forces_start))
logging.debug('Quality eval took %.3f sec' % (end - quality_start))
logging.debug('Everything took %.3f sec' % (end - start))
return quality
@staticmethod
def grasp_matrix(forces, torques, normals, soft_fingers=False,
finger_radius=0.005, params=None):
""" Computes the grasp map between contact forces and wrenchs on the object in its reference frame.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
finger_radius : float
the radius of the fingers to use
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
G : 6xM :obj:`numpy.ndarray`
grasp map
"""
if params is not None and 'finger_radius' in list(params.keys()):
finger_radius = params.finger_radius
num_forces = forces.shape[1]
num_torques = torques.shape[1]
if num_forces != num_torques:
raise ValueError('Need same number of forces and torques')
num_cols = num_forces
if soft_fingers:
num_normals = 2
if normals.ndim > 1:
num_normals = 2 * normals.shape[1]
num_cols = num_cols + num_normals
G = np.zeros([6, num_cols])
for i in range(num_forces):
G[:3, i] = forces[:, i]
# print("liang", params.torque_scaling)
G[3:, i] = params.torque_scaling * torques[:, i]
if soft_fingers:
torsion = np.pi * finger_radius ** 2 * params.friction_coef * normals * params.torque_scaling
pos_normal_i = int(-num_normals)
neg_normal_i = int(-num_normals + num_normals / 2)
G[3:, pos_normal_i:neg_normal_i] = torsion
G[3:, neg_normal_i:] = -torsion
return G
@staticmethod
def force_closure(c1, c2, friction_coef, use_abs_value=True):
"""" Checks force closure using the antipodality trick.
Parameters
----------
c1 : :obj:`Contact3D`
first contact point
c2 : :obj:`Contact3D`
second contact point
friction_coef : float
coefficient of friction at the contact point
use_abs_value : bool
whether or not to use directoinality of the surface normal (useful when mesh is not oriented)
Returns
-------
int : 1 if in force closure, 0 otherwise
"""
if c1.point is None or c2.point is None or c1.normal is None or c2.normal is None:
return 0
p1, p2 = c1.point, c2.point
n1, n2 = -c1.normal, -c2.normal # inward facing normals
if (p1 == p2).all(): # same point
return 0
for normal, contact, other_contact in [(n1, p1, p2), (n2, p2, p1)]:
diff = other_contact - contact
normal_proj = normal.dot(diff) / np.linalg.norm(normal)
if use_abs_value:
normal_proj = abs(normal.dot(diff)) / np.linalg.norm(normal)
if normal_proj < 0:
return 0 # wrong side
alpha = np.arccos(normal_proj / np.linalg.norm(diff))
if alpha > np.arctan(friction_coef):
return 0 # outside of friction cone
return 1
@staticmethod
def force_closure_qp(forces, torques, normals, soft_fingers=False,
wrench_norm_thresh=1e-3, wrench_regularizer=1e-10,
params=None):
""" Checks force closure by solving a quadratic program (whether or not zero is in the convex hull)
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
int : 1 if in force closure, 0 otherwise
"""
if params is not None:
if 'wrench_norm_thresh' in list(params.keys()):
wrench_norm_thresh = params.wrench_norm_thresh
if 'wrench_regularizer' in list(params.keys()):
wrench_regularizer = params.wrench_regularizer
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals, soft_fingers, params=params)
min_norm, _ = PointGraspMetrics3D.min_norm_vector_in_facet(G, wrench_regularizer=wrench_regularizer)
return 1 * (min_norm < wrench_norm_thresh) # if greater than wrench_norm_thresh, 0 is outside of hull
@staticmethod
def partial_closure(forces, torques, normals, soft_fingers=False,
wrench_norm_thresh=1e-3, wrench_regularizer=1e-10,
params=None):
""" Evalutes partial closure: whether or not the forces and torques can resist a specific wrench.
Estimates resistance by sollving a quadratic program (whether or not the target wrench is in the convex hull).
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
int : 1 if in partial closure, 0 otherwise
"""
force_limit = None
if params is None:
return 0
force_limit = params.force_limits
target_wrench = params.target_wrench
if 'wrench_norm_thresh' in list(params.keys()):
wrench_norm_thresh = params.wrench_norm_thresh
if 'wrench_regularizer' in list(params.keys()):
wrench_regularizer = params.wrench_regularizer
# reorganize the grasp matrix for easier constraint enforcement in optimization
num_fingers = normals.shape[1]
num_wrenches_per_finger = forces.shape[1] / num_fingers
G = np.zeros([6, 0])
for i in range(num_fingers):
start_i = num_wrenches_per_finger * i
end_i = num_wrenches_per_finger * (i + 1)
G_i = PointGraspMetrics3D.grasp_matrix(forces[:, start_i:end_i], torques[:, start_i:end_i],
normals[:, i:i + 1],
soft_fingers, params=params)
G = np.c_[G, G_i]
wrench_resisted, _ = PointGraspMetrics3D.wrench_in_positive_span(G, target_wrench, force_limit, num_fingers,
wrench_norm_thresh=wrench_norm_thresh,
wrench_regularizer=wrench_regularizer)
return 1 * wrench_resisted
@staticmethod
def wrench_resistance(forces, torques, normals, soft_fingers=False,
wrench_norm_thresh=1e-3, wrench_regularizer=1e-10,
finger_force_eps=1e-9, params=None):
""" Evalutes wrench resistance: the inverse norm of the contact forces required to resist a target wrench
Estimates resistance by sollving a quadratic program (min normal contact forces to produce a wrench).
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
finger_force_eps : float
small float to prevent numeric issues in wrench resistance metric
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
float : value of wrench resistance metric
"""
force_limit = None
if params is None:
return 0
force_limit = params.force_limits
target_wrench = params.target_wrench
if 'wrench_norm_thresh' in list(params.keys()):
wrench_norm_thresh = params.wrench_norm_thresh
if 'wrench_regularizer' in list(params.keys()):
wrench_regularizer = params.wrench_regularizer
if 'finger_force_eps' in list(params.keys()):
finger_force_eps = params.finger_force_eps
# reorganize the grasp matrix for easier constraint enforcement in optimization
num_fingers = normals.shape[1]
num_wrenches_per_finger = forces.shape[1] / num_fingers
G = np.zeros([6, 0])
for i in range(num_fingers):
start_i = num_wrenches_per_finger * i
end_i = num_wrenches_per_finger * (i + 1)
G_i = PointGraspMetrics3D.grasp_matrix(forces[:, start_i:end_i], torques[:, start_i:end_i],
normals[:, i:i + 1],
soft_fingers, params=params)
G = np.c_[G, G_i]
# compute metric from finger force norm
Q = 0
wrench_resisted, finger_force_norm = PointGraspMetrics3D.wrench_in_positive_span(G, target_wrench, force_limit,
num_fingers,
wrench_norm_thresh=wrench_norm_thresh,
wrench_regularizer=wrench_regularizer)
if wrench_resisted:
Q = 1.0 / (finger_force_norm + finger_force_eps) - 1.0 / (2 * force_limit)
return Q
@staticmethod
def min_singular(forces, torques, normals, soft_fingers=False, params=None):
""" Min singular value of grasp matrix - measure of wrench that grasp is "weakest" at resisting.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
float : value of smallest singular value
"""
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals, soft_fingers)
_, S, _ = np.linalg.svd(G)
min_sig = S[5]
return min_sig
@staticmethod
def wrench_volume(forces, torques, normals, soft_fingers=False, params=None):
""" Volume of grasp matrix singular values - score of all wrenches that the grasp can resist.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
float : value of wrench volume
"""
k = 1
if params is not None and 'k' in list(params.keys()):
k = params.k
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals, soft_fingers)
_, S, _ = np.linalg.svd(G)
sig = S
return k * np.sqrt(np.prod(sig))
@staticmethod
def grasp_isotropy(forces, torques, normals, soft_fingers=False, params=None):
""" Condition number of grasp matrix - ratio of "weakest" wrench that the grasp can exert to the "strongest" one.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
Returns
-------
float : value of grasp isotropy metric
"""
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals, soft_fingers)
_, S, _ = np.linalg.svd(G)
max_sig = S[0]
min_sig = S[5]
isotropy = min_sig / max_sig
if np.isnan(isotropy) or np.isinf(isotropy):
return 0
return isotropy
@staticmethod
def ferrari_canny_L1(forces, torques, normals, soft_fingers=False, params=None,
wrench_norm_thresh=1e-3,
wrench_regularizer=1e-10):
""" Ferrari & Canny's L1 metric. Also known as the epsilon metric.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
Returns
-------
float : value of metric
"""
if params is not None and 'wrench_norm_thresh' in list(params.keys()):
wrench_norm_thresh = params.wrench_norm_thresh
if params is not None and 'wrench_regularizer' in list(params.keys()):
wrench_regularizer = params.wrench_regularizer
# create grasp matrix
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals,
soft_fingers, params=params)
s = time.time()
# center grasp matrix for better convex hull comp
hull = cvh.ConvexHull(G.T)
# TODO: suppress ridiculous amount of output for perfectly valid input to qhull
e = time.time()
logging.debug('CVH took %.3f sec' % (e - s))
debug = False
if debug:
fig = plt.figure()
torques = G[3:, :].T
ax = Axes3D(fig)
ax.scatter(torques[:, 0], torques[:, 1], torques[:, 2], c='b', s=50)
ax.scatter(0, 0, 0, c='k', s=80)
ax.set_xlim3d(-1.5, 1.5)
ax.set_ylim3d(-1.5, 1.5)
ax.set_zlim3d(-1.5, 1.5)
ax.set_xlabel('tx')
ax.set_ylabel('ty')
ax.set_zlabel('tz')
plt.show()
if len(hull.vertices) == 0:
logging.warning('Convex hull could not be computed')
return 0.0
# determine whether or not zero is in the convex hull
s = time.time()
min_norm_in_hull, v = PointGraspMetrics3D.min_norm_vector_in_facet(G, wrench_regularizer=wrench_regularizer)
e = time.time()
logging.debug('Min norm took %.3f sec' % (e - s))
# print("shunang",min_norm_in_hull)
# if norm is greater than 0 then forces are outside of hull
if min_norm_in_hull > wrench_norm_thresh:
logging.debug('Zero not in convex hull')
return 0.0
# if there are fewer nonzeros than D-1 (dim of space minus one)
# then zero is on the boundary and therefore we do not have
# force closure
if np.sum(v > 1e-4) <= G.shape[0] - 1:
logging.warning('Zero not in interior of convex hull')
return 0.0
# find minimum norm vector across all facets of convex hull
s = time.time()
min_dist = sys.float_info.max
closest_facet = None
# print("shunang",G)
for v in hull.vertices:
if np.max(np.array(v)) < G.shape[1]: # because of some occasional odd behavior from pyhull
facet = G[:, v]
# print("shunang1",facet)
dist, _ = PointGraspMetrics3D.min_norm_vector_in_facet(facet, wrench_regularizer=wrench_regularizer)
if dist < min_dist:
min_dist = dist
closest_facet = v
e = time.time()
logging.debug('Min dist took %.3f sec for %d vertices' % (e - s, len(hull.vertices)))
return min_dist
@staticmethod
def ferrari_canny_L1_force_only(forces, torques, normals, soft_fingers=False, params=None,
wrench_norm_thresh=1e-3,
wrench_regularizer=1e-10):
""" Ferrari & Canny's L1 metric with force only. Also known as the epsilon metric.
Parameters
----------
forces : 3xN :obj:`numpy.ndarray`
set of forces on object in object basis
torques : 3xN :obj:`numpy.ndarray`
set of torques on object in object basis
normals : 3xN :obj:`numpy.ndarray`
surface normals at the contact points
soft_fingers : bool
whether or not to use the soft finger contact model
params : :obj:`GraspQualityConfig`
set of parameters for grasp matrix and contact model
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
Returns
-------
float : value of metric
"""
if params is not None and 'wrench_norm_thresh' in list(params.keys()):
wrench_norm_thresh = params.wrench_norm_thresh
if params is not None and 'wrench_regularizer' in list(params.keys()):
wrench_regularizer = params.wrench_regularizer
# create grasp matrix
G = PointGraspMetrics3D.grasp_matrix(forces, torques, normals,
soft_fingers, params=params)
G = G[:3, :]
s = time.time()
# center grasp matrix for better convex hull comp
hull = cvh.ConvexHull(G.T)
# TODO: suppress ridiculous amount of output for perfectly valid input to qhull
e = time.time()
logging.debug('CVH took %.3f sec' % (e - s))
debug = False
if debug:
fig = plt.figure()
torques = G[3:, :].T
ax = Axes3D(fig)
ax.scatter(torques[:, 0], torques[:, 1], torques[:, 2], c='b', s=50)
ax.scatter(0, 0, 0, c='k', s=80)
ax.set_xlim3d(-1.5, 1.5)
ax.set_ylim3d(-1.5, 1.5)
ax.set_zlim3d(-1.5, 1.5)
ax.set_xlabel('tx')
ax.set_ylabel('ty')
ax.set_zlabel('tz')
plt.show()
if len(hull.vertices) == 0:
logging.warning('Convex hull could not be computed')
return 0.0
# determine whether or not zero is in the convex hull
s = time.time()
min_norm_in_hull, v = PointGraspMetrics3D.min_norm_vector_in_facet(G, wrench_regularizer=wrench_regularizer)
e = time.time()
logging.debug('Min norm took %.3f sec' % (e - s))
# print("shunang",min_norm_in_hull)
# if norm is greater than 0 then forces are outside of hull
if min_norm_in_hull > wrench_norm_thresh:
logging.debug('Zero not in convex hull')
return 0.0
# if there are fewer nonzeros than D-1 (dim of space minus one)
# then zero is on the boundary and therefore we do not have
# force closure
if np.sum(v > 1e-4) <= G.shape[0] - 1:
logging.warning('Zero not in interior of convex hull')
return 0.0
# find minimum norm vector across all facets of convex hull
s = time.time()
min_dist = sys.float_info.max
closest_facet = None
# print("shunang",G)
for v in hull.vertices:
if np.max(np.array(v)) < G.shape[1]: # because of some occasional odd behavior from pyhull
facet = G[:, v]
# print("shunang1",facet)
dist, _ = PointGraspMetrics3D.min_norm_vector_in_facet(facet, wrench_regularizer=wrench_regularizer)
if dist < min_dist:
min_dist = dist
closest_facet = v
e = time.time()
logging.debug('Min dist took %.3f sec for %d vertices' % (e - s, len(hull.vertices)))
return min_dist
@staticmethod
def wrench_in_positive_span(wrench_basis, target_wrench, force_limit, num_fingers=1,
wrench_norm_thresh=1e-4, wrench_regularizer=1e-10):
""" Check whether a target can be exerted by positive combinations of wrenches in a given basis with L1 norm fonger force limit limit.
Parameters
----------
wrench_basis : 6xN :obj:`numpy.ndarray`
basis for the wrench space
target_wrench : 6x1 :obj:`numpy.ndarray`
target wrench to resist
force_limit : float
L1 upper bound on the forces per finger (aka contact point)
num_fingers : int
number of contacts, used to enforce L1 finger constraint
wrench_norm_thresh : float
threshold to use to determine equivalence of target wrenches
wrench_regularizer : float
small float to make quadratic program positive semidefinite
Returns
-------
int
whether or not wrench can be resisted
float
minimum norm of the finger forces required to resist the wrench
"""
num_wrenches = wrench_basis.shape[1]
# quadratic and linear costs
P = wrench_basis.T.dot(wrench_basis) + wrench_regularizer * np.eye(num_wrenches)
q = -wrench_basis.T.dot(target_wrench)
# inequalities
lam_geq_zero = -1 * np.eye(num_wrenches)
num_wrenches_per_finger = num_wrenches / num_fingers
force_constraint = np.zeros([num_fingers, num_wrenches])
for i in range(num_fingers):
start_i = num_wrenches_per_finger * i
end_i = num_wrenches_per_finger * (i + 1)
force_constraint[i, start_i:end_i] = np.ones(num_wrenches_per_finger)
G = np.r_[lam_geq_zero, force_constraint]
h = np.zeros(num_wrenches + num_fingers)
for i in range(num_fingers):
h[num_wrenches + i] = force_limit
# convert to cvx and solve
P = cvx.matrix(P)
q = cvx.matrix(q)
G = cvx.matrix(G)
h = cvx.matrix(h)
sol = cvx.solvers.qp(P, q, G, h)
v = np.array(sol['x'])
min_dist = np.linalg.norm(wrench_basis.dot(v).ravel() - target_wrench) ** 2
# add back in the target wrench
return min_dist < wrench_norm_thresh, np.linalg.norm(v)
@staticmethod
def min_norm_vector_in_facet(facet, wrench_regularizer=1e-10):
""" Finds the minimum norm point in the convex hull of a given facet (aka simplex) by solving a QP.
Parameters
----------
facet : 6xN :obj:`numpy.ndarray`
vectors forming the facet
wrench_regularizer : float
small float to make quadratic program positive semidefinite
Returns
-------
float
minimum norm of any point in the convex hull of the facet
Nx1 :obj:`numpy.ndarray`
vector of coefficients that achieves the minimum
"""
dim = facet.shape[1] # num vertices in facet
# create alpha weights for vertices of facet
G = facet.T.dot(facet)
grasp_matrix = G + wrench_regularizer * np.eye(G.shape[0])
# Solve QP to minimize .5 x'Px + q'x subject to Gx <= h, Ax = b
P = cvx.matrix(2 * grasp_matrix) # quadratic cost for Euclidean dist
q = cvx.matrix(np.zeros((dim, 1)))
G = cvx.matrix(-np.eye(dim)) # greater than zero constraint
h = cvx.matrix(np.zeros((dim, 1)))
A = cvx.matrix(np.ones((1, dim))) # sum constraint to enforce convex
b = cvx.matrix(np.ones(1)) # combinations of vertices
sol = cvx.solvers.qp(P, q, G, h, A, b)
v = np.array(sol['x'])
min_norm = np.sqrt(sol['primal objective'])
return abs(min_norm), v

View File

@@ -0,0 +1,389 @@
__author__ = 'cxwang, mhgou'
__version__ = '1.0'
import os
import time
import numpy as np
import open3d as o3d
from transforms3d.euler import euler2mat, quat2mat
from .rotation import batch_viewpoint_params_to_matrix, matrix_to_dexnet_params
from .dexnet.grasping.quality import PointGraspMetrics3D
from .dexnet.grasping.grasp import ParallelJawPtGrasp3D
from .dexnet.grasping.graspable_object import GraspableObject3D
from .dexnet.grasping.grasp_quality_config import GraspQualityConfigFactory
from .dexnet.grasping.contacts import Contact3D
from .dexnet.grasping.meshpy.obj_file import ObjFile
from .dexnet.grasping.meshpy.sdf_file import SdfFile
def get_scene_name(num):
'''
**Input:**
- num: int of the scene number.
**Output:**
- string of the scene name.
'''
return ('scene_%04d' % (num,))
def create_table_points(lx, ly, lz, dx=0, dy=0, dz=0, grid_size=0.01):
'''
**Input:**
- lx:
- ly:
- lz:
**Output:**
- numpy array of the points with shape (-1, 3).
'''
xmap = np.linspace(0, lx, int(lx/grid_size))
ymap = np.linspace(0, ly, int(ly/grid_size))
zmap = np.linspace(0, lz, int(lz/grid_size))
xmap, ymap, zmap = np.meshgrid(xmap, ymap, zmap, indexing='xy')
xmap += dx
ymap += dy
zmap += dz
points = np.stack([xmap, ymap, zmap], axis=-1)
points = points.reshape([-1, 3])
return points
def parse_posevector(posevector):
'''
**Input:**
- posevector: list of pose
**Output:**
- obj_idx: int of the index of object.
- mat: numpy array of shape (4, 4) of the 6D pose of object.
'''
mat = np.zeros([4,4],dtype=np.float32)
alpha, beta, gamma = posevector[4:7]
alpha = alpha / 180.0 * np.pi
beta = beta / 180.0 * np.pi
gamma = gamma / 180.0 * np.pi
mat[:3,:3] = euler2mat(alpha, beta, gamma)
mat[:3,3] = posevector[1:4]
mat[3,3] = 1
obj_idx = int(posevector[0])
return obj_idx, mat
def load_dexnet_model(data_path):
'''
**Input:**
- data_path: path to load .obj & .sdf files
**Output:**
- obj: dexnet model
'''
of = ObjFile('{}.obj'.format(data_path))
sf = SdfFile('{}.sdf'.format(data_path))
mesh = of.read()
sdf = sf.read()
obj = GraspableObject3D(sdf, mesh)
return obj
def transform_points(points, trans):
'''
**Input:**
- points: (N, 3)
- trans: (4, 4)
**Output:**
- points_trans: (N, 3)
'''
ones = np.ones([points.shape[0],1], dtype=points.dtype)
points_ = np.concatenate([points, ones], axis=-1)
points_ = np.matmul(trans, points_.T).T
points_trans = points_[:,:3]
return points_trans
def compute_point_distance(A, B):
'''
**Input:**
- A: (N, 3)
- B: (M, 3)
**Output:**
- dists: (N, M)
'''
A = A[:, np.newaxis, :]
B = B[np.newaxis, :, :]
dists = np.linalg.norm(A-B, axis=-1)
return dists
def compute_closest_points(A, B):
'''
**Input:**
- A: (N, 3)
- B: (M, 3)
**Output:**
- indices: (N,) closest point index in B for each point in A
'''
dists = compute_point_distance(A, B)
indices = np.argmin(dists, axis=-1)
return indices
def voxel_sample_points(points, voxel_size=0.008):
'''
**Input:**
- points: (N, 3)
**Output:**
- points: (n, 3)
'''
cloud = o3d.geometry.PointCloud()
cloud.points = o3d.utility.Vector3dVector(points)
cloud = cloud.voxel_down_sample(voxel_size)
points = np.array(cloud.points)
return points
def topk_grasps(grasps, k=10):
'''
**Input:**
- grasps: (N, 17)
- k: int
**Output:**
- topk_grasps: (k, 17)
'''
assert(k > 0)
grasp_confidence = grasps[:, 0]
indices = np.argsort(-grasp_confidence)
topk_indices = indices[:min(k, len(grasps))]
topk_grasps = grasps[topk_indices]
return topk_grasps
def get_grasp_score(grasp, obj, fc_list, force_closure_quality_config):
tmp, is_force_closure = False, False
quality = -1
for ind_, value_fc in enumerate(fc_list):
value_fc = round(value_fc, 2)
tmp = is_force_closure
is_force_closure = PointGraspMetrics3D.grasp_quality(grasp, obj, force_closure_quality_config[value_fc])
if tmp and not is_force_closure:
quality = round(fc_list[ind_ - 1], 2)
break
elif is_force_closure and value_fc == fc_list[-1]:
quality = value_fc
break
elif value_fc == fc_list[0] and not is_force_closure:
break
return quality
def collision_detection(grasp_list, model_list, dexnet_models, poses, scene_points, outlier=0.05, empty_thresh=10, return_dexgrasps=False):
'''
**Input:**
- grasp_list: [(k1, 17), (k2, 17), ..., (kn, 17)] in camera coordinate
- model_list: [(N1, 3), (N2, 3), ..., (Nn, 3)] in camera coordinate
- dexnet_models: [GraspableObject3D,] in object coordinate
- poses: [(4, 4),] from model coordinate to camera coordinate
- scene_points: (Ns, 3) in camera coordinate
- outlier: float, used to compute workspace mask
- empty_thresh: int, 'num_inner_points < empty_thresh' means empty grasp
- return_dexgrasps: bool, return grasps in dex-net format while True
**Output:**
- collsion_mask_list: [(k1,), (k2,), ..., (kn,)]
- empty_mask_list: [(k1,), (k2,), ..., (kn,)]
- dexgrasp_list: [[ParallelJawPtGrasp3D,],] in object coordinate
'''
height = 0.02
depth_base = 0.02
finger_width = 0.01
collision_mask_list = list()
num_models = len(model_list)
empty_mask_list = list()
dexgrasp_list = list()
for i in range(num_models):
if len(grasp_list[i]) == 0:
collision_mask_list.append(list())
empty_mask_list.append(list())
if return_dexgrasps:
dexgrasp_list.append(list())
continue
## parse grasp parameters
model = model_list[i]
obj_pose = poses[i]
dexnet_model = dexnet_models[i]
grasps = grasp_list[i]
grasp_points = grasps[:, 13:16]
grasp_poses = grasps[:, 4:13].reshape([-1,3,3])
grasp_depths = grasps[:, 3]
grasp_widths = grasps[:, 1]
## crop scene, remove outlier
xmin, xmax = model[:,0].min(), model[:,0].max()
ymin, ymax = model[:,1].min(), model[:,1].max()
zmin, zmax = model[:,2].min(), model[:,2].max()
xlim = ((scene_points[:,0] > xmin-outlier) & (scene_points[:,0] < xmax+outlier))
ylim = ((scene_points[:,1] > ymin-outlier) & (scene_points[:,1] < ymax+outlier))
zlim = ((scene_points[:,2] > zmin-outlier) & (scene_points[:,2] < zmax+outlier))
workspace = scene_points[xlim & ylim & zlim]
# transform scene to gripper frame
target = (workspace[np.newaxis,:,:] - grasp_points[:,np.newaxis,:])
target = np.matmul(target, grasp_poses)
# compute collision mask
mask1 = ((target[:,:,2]>-height/2) & (target[:,:,2]<height/2))
mask2 = ((target[:,:,0]>-depth_base) & (target[:,:,0]<grasp_depths[:,np.newaxis]))
mask3 = (target[:,:,1]>-(grasp_widths[:,np.newaxis]/2+finger_width))
mask4 = (target[:,:,1]<-grasp_widths[:,np.newaxis]/2)
mask5 = (target[:,:,1]<(grasp_widths[:,np.newaxis]/2+finger_width))
mask6 = (target[:,:,1]>grasp_widths[:,np.newaxis]/2)
mask7 = ((target[:,:,0]>-(depth_base+finger_width)) & (target[:,:,0]<-depth_base))
left_mask = (mask1 & mask2 & mask3 & mask4)
right_mask = (mask1 & mask2 & mask5 & mask6)
bottom_mask = (mask1 & mask3 & mask5 & mask7)
inner_mask = (mask1 & mask2 &(~mask4) & (~mask6))
collision_mask = np.any((left_mask | right_mask | bottom_mask), axis=-1)
empty_mask = (np.sum(inner_mask, axis=-1) < empty_thresh)
collision_mask = (collision_mask | empty_mask)
collision_mask_list.append(collision_mask)
empty_mask_list.append(empty_mask)
## generate grasps in dex-net format
if return_dexgrasps:
dexgrasps = list()
for grasp_id,_ in enumerate(grasps):
grasp_point = grasp_points[grasp_id]
R = grasp_poses[grasp_id]
width = grasp_widths[grasp_id]
depth = grasp_depths[grasp_id]
points_in_gripper = target[grasp_id][inner_mask[grasp_id]]
if empty_mask[grasp_id]:
dexgrasps.append(None)
continue
center = np.array([depth, 0, 0]).reshape([3, 1]) # gripper coordinate
center = np.dot(grasp_poses[grasp_id], center).reshape([3])
center = (center + grasp_point).reshape([1,3]) # camera coordinate
center = transform_points(center, np.linalg.inv(obj_pose)).reshape([3]) # object coordinate
R = np.dot(obj_pose[:3,:3].T, R)
binormal, approach_angle = matrix_to_dexnet_params(R)
grasp = ParallelJawPtGrasp3D(ParallelJawPtGrasp3D.configuration_from_params(
center, binormal, width, approach_angle), depth)
dexgrasps.append(grasp)
dexgrasp_list.append(dexgrasps)
if return_dexgrasps:
return collision_mask_list, empty_mask_list, dexgrasp_list
else:
return collision_mask_list, empty_mask_list
def eval_grasp(grasp_group, models, dexnet_models, poses, config, table=None, voxel_size=0.008, TOP_K = 50):
'''
**Input:**
- grasp_group: GraspGroup instance for evaluation.
- models: in model coordinate
- dexnet_models: models in dexnet format
- poses: from model to camera coordinate
- config: dexnet config.
- table: in camera coordinate
- voxel_size: float of the voxel size.
- TOP_K: int of the number of top grasps to evaluate.
'''
num_models = len(models)
## grasp nms
grasp_group = grasp_group.nms(0.03, 30.0/180*np.pi)
## assign grasps to object
# merge and sample scene
model_trans_list = list()
seg_mask = list()
for i,model in enumerate(models):
model_trans = transform_points(model, poses[i])
seg = i * np.ones(model_trans.shape[0], dtype=np.int32)
model_trans_list.append(model_trans)
seg_mask.append(seg)
seg_mask = np.concatenate(seg_mask, axis=0)
scene = np.concatenate(model_trans_list, axis=0)
# assign grasps
indices = compute_closest_points(grasp_group.translations, scene)
model_to_grasp = seg_mask[indices]
pre_grasp_list = list()
for i in range(num_models):
grasp_i = grasp_group[model_to_grasp==i]
grasp_i.sort_by_score()
pre_grasp_list.append(grasp_i[:10].grasp_group_array)
all_grasp_list = np.vstack(pre_grasp_list)
remain_mask = np.argsort(all_grasp_list[:,0])[::-1]
min_score = all_grasp_list[remain_mask[min(49,len(remain_mask) - 1)],0]
grasp_list = []
for i in range(num_models):
remain_mask_i = pre_grasp_list[i][:,0] >= min_score
grasp_list.append(pre_grasp_list[i][remain_mask_i])
# grasp_list = pre_grasp_list
## collision detection
if table is not None:
scene = np.concatenate([scene, table])
collision_mask_list, empty_list, dexgrasp_list = collision_detection(
grasp_list, model_trans_list, dexnet_models, poses, scene, outlier=0.05, return_dexgrasps=True)
## evaluate grasps
# score configurations
force_closure_quality_config = dict()
fc_list = np.array([1.2, 1.0, 0.8, 0.6, 0.4, 0.2])
for value_fc in fc_list:
value_fc = round(value_fc, 2)
config['metrics']['force_closure']['friction_coef'] = value_fc
force_closure_quality_config[value_fc] = GraspQualityConfigFactory.create_config(config['metrics']['force_closure'])
# get grasp scores
score_list = list()
for i in range(num_models):
dexnet_model = dexnet_models[i]
collision_mask = collision_mask_list[i]
dexgrasps = dexgrasp_list[i]
scores = list()
num_grasps = len(dexgrasps)
for grasp_id in range(num_grasps):
if collision_mask[grasp_id]:
scores.append(-1.)
continue
if dexgrasps[grasp_id] is None:
scores.append(-1.)
continue
grasp = dexgrasps[grasp_id]
score = get_grasp_score(grasp, dexnet_model, fc_list, force_closure_quality_config)
scores.append(score)
score_list.append(np.array(scores))
return grasp_list, score_list, collision_mask_list

View File

@@ -0,0 +1,94 @@
__author__ = 'Minghao Gou'
__version__ = '1.0'
"""
define the pose class and functions associated with this class.
"""
import numpy as np
from . import trans3d
from transforms3d.euler import euler2quat
class Pose:
def __init__(self,id,x,y,z,alpha,beta,gamma):
self.id = id
self.x = x
self.y = y
self.z = z
# alpha, bata, gamma is in degree
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.quat = self.get_quat()
self.mat_4x4 = self.get_mat_4x4()
self.translation = self.get_translation()
def __repr__(self):
return '\nPose id=%d,x=%f,y=%f,z=%f,alpha=%f,beta=%f,gamma=%f' %(self.id,self.x,self.y,self.z,self.alpha,self.beta,self.gamma)+'\n'+'translation:'+self.translation.__repr__() + '\nquat:'+self.quat.__repr__()+'\nmat_4x4:'+self.mat_4x4.__repr__()
def get_id(self):
"""
**Output:**
- return the id of this object
"""
return self.id
def get_translation(self):
"""
**Output:**
- Convert self.x, self.y, self.z into self.translation
"""
return np.array([self.x,self.y,self.z])
def get_quat(self):
"""
**Output:**
- Convert self.alpha, self.beta, self.gamma into self.quat
"""
euler = np.array([self.alpha, self.beta, self.gamma]) / 180.0 * np.pi
quat = euler2quat(euler[0],euler[1],euler[2])
return quat
def get_mat_4x4(self):
"""
**Output:**
- Convert self.x, self.y, self.z, self.alpha, self.beta and self.gamma into mat_4x4 pose
"""
mat_4x4 = trans3d.get_mat(self.x,self.y,self.z,self.alpha,self.beta,self.gamma)
return mat_4x4
def pose_from_pose_vector(pose_vector):
"""
**Input:**
- pose_vector: A list in the format of [id,x,y,z,alpha,beta,gamma]
**Output:**
- A pose class instance
"""
return Pose(id = pose_vector[0],
x = pose_vector[1],
y = pose_vector[2],
z = pose_vector[3],
alpha = pose_vector[4],
beta = pose_vector[5],
gamma = pose_vector[6])
def pose_list_from_pose_vector_list(pose_vector_list):
"""
**Input:**
- Pose vector list defined in xmlhandler.py
**Output:**
- list of poses.
"""
pose_list = []
for pose_vector in pose_vector_list:
pose_list.append(pose_from_pose_vector(pose_vector))
return pose_list

View File

@@ -0,0 +1,142 @@
""" Author: chenxi-wang
Transformation matrices from/to viewpoints and dexnet gripper params.
"""
import numpy as np
from math import pi
def rotation_matrix(alpha, beta, gamma):
'''
**Input:**
- alpha: float of alpha angle.
- beta: float of beta angle.
- gamma: float of the gamma angle.
**Output:**
- numpy array of shape (3, 3) of rotation matrix.
'''
Rx = np.array([[1, 0, 0],
[0, np.cos(alpha), -np.sin(alpha)],
[0, np.sin(alpha), np.cos(alpha)]])
Ry = np.array([[np.cos(beta), 0, np.sin(beta)],
[0, 1, 0],
[-np.sin(beta), 0, np.cos(beta)]])
Rz = np.array([[np.cos(gamma), -np.sin(gamma), 0],
[np.sin(gamma), np.cos(gamma), 0],
[0, 0, 1]])
R = Rz.dot(Ry).dot(Rx)
return R
def matrix_to_dexnet_params(matrix):
'''
**Input:**
- numpy array of shape (3, 3) of the rotation matrix.
**Output:**
- binormal: numpy array of shape (3,).
- angle: float of the angle.
'''
approach = matrix[:, 0]
binormal = matrix[:, 1]
axis_y = binormal
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R = np.c_[axis_x, np.c_[axis_y, axis_z]]
approach = R.T.dot(approach)
cos_t, sin_t = approach[0], -approach[2]
angle = np.arccos(max(min(cos_t,1),-1))
if sin_t < 0:
angle = pi * 2 - angle
return binormal, angle
def viewpoint_params_to_matrix(towards, angle):
'''
**Input:**
- towards: numpy array towards vector with shape (3,).
- angle: float of in-plane rotation.
**Output:**
- numpy array of the rotation matrix with shape (3, 3).
'''
axis_x = towards
axis_y = np.array([-axis_x[1], axis_x[0], 0])
if np.linalg.norm(axis_y) == 0:
axis_y = np.array([0, 1, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R1 = np.array([[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]])
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
matrix = R2.dot(R1)
return matrix.astype(np.float32)
def batch_viewpoint_params_to_matrix(batch_towards, batch_angle):
'''
**Input:**
- towards: numpy array towards vectors with shape (n, 3).
- angle: numpy array of in-plane rotations (n, ).
**Output:**
- numpy array of the rotation matrix with shape (n, 3, 3).
'''
axis_x = batch_towards
ones = np.ones(axis_x.shape[0], dtype=axis_x.dtype)
zeros = np.zeros(axis_x.shape[0], dtype=axis_x.dtype)
axis_y = np.stack([-axis_x[:,1], axis_x[:,0], zeros], axis=-1)
mask_y = (np.linalg.norm(axis_y, axis=-1) == 0)
axis_y[mask_y] = np.array([0, 1, 0])
axis_x = axis_x / np.linalg.norm(axis_x, axis=-1, keepdims=True)
axis_y = axis_y / np.linalg.norm(axis_y, axis=-1, keepdims=True)
axis_z = np.cross(axis_x, axis_y)
sin = np.sin(batch_angle)
cos = np.cos(batch_angle)
R1 = np.stack([ones, zeros, zeros, zeros, cos, -sin, zeros, sin, cos], axis=-1)
R1 = R1.reshape([-1,3,3])
R2 = np.stack([axis_x, axis_y, axis_z], axis=-1)
matrix = np.matmul(R2, R1)
return matrix.astype(np.float32)
def dexnet_params_to_matrix(binormal, angle):
'''
**Input:**
- binormal: numpy array of shape (3,).
- angle: float of the angle.
**Output:**
- numpy array of shape (3, 3) of the rotation matrix.
'''
axis_y = binormal
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R1 = np.array([[np.cos(angle), 0, np.sin(angle)],
[0, 1, 0],
[-np.sin(angle), 0, np.cos(angle)]])
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
matrix = R2.dot(R1)
return matrix

View File

@@ -0,0 +1,62 @@
from transforms3d.quaternions import mat2quat, quat2mat
from transforms3d.euler import quat2euler, euler2quat
import numpy as np
def get_pose(pose):
pos, quat = pose_4x4_to_pos_quat(pose)
euler = np.array([quat2euler(quat)[0], quat2euler(quat)[1],quat2euler(quat)[2]])
euler = euler * 180.0 / np.pi
alpha, beta, gamma = euler[0], euler[1], euler[2]
x, y, z = pos[0], pos[1], pos[2]
return x,y,z, alpha, beta, gamma
def get_mat(x,y,z, alpha, beta, gamma):
"""
Calls get_mat() to get the 4x4 matrix
"""
try:
euler = np.array([alpha, beta, gamma]) / 180.0 * np.pi
quat = np.array(euler2quat(euler[0],euler[1],euler[2]))
pose = pos_quat_to_pose_4x4(np.array([x,y,z]), quat)
return pose
except Exception as e:
print(str(e))
pass
def pos_quat_to_pose_4x4(pos, quat):
"""pose = pos_quat_to_pose_4x4(pos, quat)
Convert pos and quat into pose, 4x4 format
Args:
pos: length-3 position
quat: length-4 quaternion
Returns:
pose: numpy array, 4x4
"""
pose = np.zeros([4, 4])
mat = quat2mat(quat)
pose[0:3, 0:3] = mat[:, :]
pose[0:3, -1] = pos[:]
pose[-1, -1] = 1
return pose
def pose_4x4_to_pos_quat(pose):
"""
Convert pose, 4x4 format into pos and quat
Args:
pose: numpy array, 4x4
Returns:
pos: length-3 position
quat: length-4 quaternion
"""
mat = pose[:3, :3]
quat = mat2quat(mat)
pos = np.zeros([3])
pos[0] = pose[0, 3]
pos[1] = pose[1, 3]
pos[2] = pose[2, 3]
return pos, quat

View File

@@ -0,0 +1,807 @@
import os
import open3d as o3d
import numpy as np
from PIL import Image
from transforms3d.euler import euler2mat
from .rotation import batch_viewpoint_params_to_matrix
from .xmlhandler import xmlReader
class CameraInfo():
''' Author: chenxi-wang
Camera intrinsics for point cloud generation.
'''
def __init__(self, width, height, fx, fy, cx, cy, scale):
self.width = width
self.height = height
self.fx = fx
self.fy = fy
self.cx = cx
self.cy = cy
self.scale = scale
def get_camera_intrinsic(camera):
'''
**Input:**
- camera: string of type of camera, "realsense" or "kinect".
**Output:**
- numpy array of shape (3, 3) of the camera intrinsic matrix.
'''
param = o3d.camera.PinholeCameraParameters()
if camera == 'kinect':
param.intrinsic.set_intrinsics(1280,720,631.55,631.21,638.43,366.50)
elif camera == 'realsense':
param.intrinsic.set_intrinsics(1280,720,927.17,927.37,651.32,349.62)
intrinsic = param.intrinsic.intrinsic_matrix
return intrinsic
def create_point_cloud_from_depth_image(depth, camera, organized=True):
assert(depth.shape[0] == camera.height and depth.shape[1] == camera.width)
xmap = np.arange(camera.width)
ymap = np.arange(camera.height)
xmap, ymap = np.meshgrid(xmap, ymap)
points_z = depth / camera.scale
points_x = (xmap - camera.cx) * points_z / camera.fx
points_y = (ymap - camera.cy) * points_z / camera.fy
cloud = np.stack([points_x, points_y, points_z], axis=-1)
if not organized:
cloud = cloud.reshape([-1, 3])
return cloud
def generate_views(N, phi=(np.sqrt(5)-1)/2, center=np.zeros(3, dtype=np.float32), R=1):
''' Author: chenxi-wang
View sampling on a sphere using Febonacci lattices.
**Input:**
- N: int, number of viewpoints.
- phi: float, constant angle to sample views, usually 0.618.
- center: numpy array of (3,), sphere center.
- R: float, sphere radius.
**Output:**
- numpy array of (N, 3), coordinates of viewpoints.
'''
idxs = np.arange(N, dtype=np.float32)
Z = (2 * idxs + 1) / N - 1
X = np.sqrt(1 - Z**2) * np.cos(2 * idxs * np.pi * phi)
Y = np.sqrt(1 - Z**2) * np.sin(2 * idxs * np.pi * phi)
views = np.stack([X,Y,Z], axis=1)
views = R * np.array(views) + center
return views
def generate_scene_model(dataset_root, scene_name, anno_idx, return_poses=False, align=False, camera='realsense'):
'''
Author: chenxi-wang
**Input:**
- dataset_root: str, graspnet dataset root
- scene_name: str, name of scene folder, e.g. scene_0000
- anno_idx: int, frame index from 0-255
- return_poses: bool, return object ids and 6D poses if set to True
- align: bool, transform to table coordinates if set to True
- camera: str, camera name (realsense or kinect)
**Output:**
- list of open3d.geometry.PointCloud.
'''
if align:
camera_poses = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'camera_poses.npy'))
camera_pose = camera_poses[anno_idx]
align_mat = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'cam0_wrt_table.npy'))
camera_pose = np.matmul(align_mat,camera_pose)
print('Scene {}, {}'.format(scene_name, camera))
scene_reader = xmlReader(os.path.join(dataset_root, 'scenes', scene_name, camera, 'annotations', '%04d.xml'%anno_idx))
posevectors = scene_reader.getposevectorlist()
obj_list = []
mat_list = []
model_list = []
pose_list = []
for posevector in posevectors:
obj_idx, pose = parse_posevector(posevector)
obj_list.append(obj_idx)
mat_list.append(pose)
for obj_idx, pose in zip(obj_list, mat_list):
plyfile = os.path.join(dataset_root, 'models', '%03d'%obj_idx, 'nontextured.ply')
model = o3d.io.read_point_cloud(plyfile)
points = np.array(model.points)
if align:
pose = np.dot(camera_pose, pose)
points = transform_points(points, pose)
model.points = o3d.utility.Vector3dVector(points)
model_list.append(model)
pose_list.append(pose)
if return_poses:
return model_list, obj_list, pose_list
else:
return model_list
def generate_scene_pointcloud(dataset_root, scene_name, anno_idx, align=False, camera='kinect'):
'''
Author: chenxi-wang
**Input:**
- dataset_root: str, graspnet dataset root
- scene_name: str, name of scene folder, e.g. scene_0000
- anno_idx: int, frame index from 0-255
- align: bool, transform to table coordinates if set to True
- camera: str, camera name (realsense or kinect)
**Output:**
- open3d.geometry.PointCloud.
'''
colors = np.array(Image.open(os.path.join(dataset_root, 'scenes', scene_name, camera, 'rgb', '%04d.png'%anno_idx)), dtype=np.float32) / 255.0
depths = np.array(Image.open(os.path.join(dataset_root, 'scenes', scene_name, camera, 'depth', '%04d.png'%anno_idx)))
intrinsics = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'camK.npy'))
fx, fy = intrinsics[0,0], intrinsics[1,1]
cx, cy = intrinsics[0,2], intrinsics[1,2]
s = 1000.0
if align:
camera_poses = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'camera_poses.npy'))
camera_pose = camera_poses[anno_idx]
align_mat = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'cam0_wrt_table.npy'))
camera_pose = align_mat.dot(camera_pose)
xmap, ymap = np.arange(colors.shape[1]), np.arange(colors.shape[0])
xmap, ymap = np.meshgrid(xmap, ymap)
points_z = depths / s
points_x = (xmap - cx) / fx * points_z
points_y = (ymap - cy) / fy * points_z
mask = (points_z > 0)
points = np.stack([points_x, points_y, points_z], axis=-1)
points = points[mask]
colors = colors[mask]
if align:
points = transform_points(points, camera_pose)
cloud = o3d.geometry.PointCloud()
cloud.points = o3d.utility.Vector3dVector(points)
cloud.colors = o3d.utility.Vector3dVector(colors)
return cloud
def rotation_matrix(rx, ry, rz):
'''
Author: chenxi-wang
**Input:**
- rx/ry/rz: float, rotation angle along x/y/z-axis
**Output:**
- numpy array of (3, 3), rotation matrix.
'''
Rx = np.array([[1, 0, 0],
[0, np.cos(rx), -np.sin(rx)],
[0, np.sin(rx), np.cos(rx)]])
Ry = np.array([[ np.cos(ry), 0, np.sin(ry)],
[ 0, 1, 0],
[-np.sin(ry), 0, np.cos(ry)]])
Rz = np.array([[np.cos(rz), -np.sin(rz), 0],
[np.sin(rz), np.cos(rz), 0],
[ 0, 0, 1]])
R = Rz.dot(Ry).dot(Rx)
return R
def transform_matrix(tx, ty, tz, rx, ry, rz):
'''
Author: chenxi-wang
**Input:**
- tx/ty/tz: float, translation along x/y/z-axis
- rx/ry/rz: float, rotation angle along x/y/z-axis
**Output:**
- numpy array of (4, 4), transformation matrix.
'''
trans = np.eye(4)
trans[:3,3] = np.array([tx, ty, tz])
rot_x = np.array([[1, 0, 0],
[0, np.cos(rx), -np.sin(rx)],
[0, np.sin(rx), np.cos(rx)]])
rot_y = np.array([[ np.cos(ry), 0, np.sin(ry)],
[ 0, 1, 0],
[-np.sin(ry), 0, np.cos(ry)]])
rot_z = np.array([[np.cos(rz), -np.sin(rz), 0],
[np.sin(rz), np.cos(rz), 0],
[ 0, 0, 1]])
trans[:3,:3] = rot_x.dot(rot_y).dot(rot_z)
return trans
def matrix_to_dexnet_params(matrix):
'''
Author: chenxi-wang
**Input:**
- numpy array of shape (3, 3) of the rotation matrix.
**Output:**
- binormal: numpy array of shape (3,).
- angle: float of the angle.
'''
approach = matrix[:, 0]
binormal = matrix[:, 1]
axis_y = binormal
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R = np.c_[axis_x, np.c_[axis_y, axis_z]]
approach = R.T.dot(approach)
cos_t, sin_t = approach[0], -approach[2]
angle = np.arccos(cos_t)
if sin_t < 0:
angle = np.pi * 2 - angle
return binormal, angle
def viewpoint_params_to_matrix(towards, angle):
'''
Author: chenxi-wang
**Input:**
- towards: numpy array towards vector with shape (3,).
- angle: float of in-plane rotation.
**Output:**
- numpy array of the rotation matrix with shape (3, 3).
'''
axis_x = towards
axis_y = np.array([-axis_x[1], axis_x[0], 0])
if np.linalg.norm(axis_y) == 0:
axis_y = np.array([0, 1, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R1 = np.array([[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)]])
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
matrix = R2.dot(R1)
return matrix
def dexnet_params_to_matrix(binormal, angle):
'''
Author: chenxi-wang
**Input:**
- binormal: numpy array of shape (3,).
- angle: float of the angle.
**Output:**
- numpy array of shape (3, 3) of the rotation matrix.
'''
axis_y = binormal
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R1 = np.array([[np.cos(angle), 0, np.sin(angle)],
[0, 1, 0],
[-np.sin(angle), 0, np.cos(angle)]])
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
matrix = R2.dot(R1)
return matrix
def transform_points(points, trans):
'''
Author: chenxi-wang
**Input:**
- points: numpy array of (N,3), point cloud
- trans: numpy array of (4,4), transformation matrix
**Output:**
- numpy array of (N,3), transformed points.
'''
ones = np.ones([points.shape[0],1], dtype=points.dtype)
points_ = np.concatenate([points, ones], axis=-1)
points_ = np.matmul(trans, points_.T).T
return points_[:,:3]
def get_model_grasps(datapath):
''' Author: chenxi-wang
Load grasp labels from .npz files.
'''
label = np.load(datapath)
points = label['points']
offsets = label['offsets']
scores = label['scores']
collision = label['collision']
return points, offsets, scores, collision
def parse_posevector(posevector):
''' Author: chenxi-wang
Decode posevector to object id and transformation matrix.
'''
mat = np.zeros([4,4],dtype=np.float32)
alpha, beta, gamma = posevector[4:7]
alpha = alpha / 180.0 * np.pi
beta = beta / 180.0 * np.pi
gamma = gamma / 180.0 * np.pi
mat[:3,:3] = euler2mat(alpha, beta, gamma)
mat[:3,3] = posevector[1:4]
mat[3,3] = 1
obj_idx = int(posevector[0])
return obj_idx, mat
def create_mesh_box(width, height, depth, dx=0, dy=0, dz=0):
''' Author: chenxi-wang
Create box instance with mesh representation.
'''
box = o3d.geometry.TriangleMesh()
vertices = np.array([[0,0,0],
[width,0,0],
[0,0,depth],
[width,0,depth],
[0,height,0],
[width,height,0],
[0,height,depth],
[width,height,depth]])
vertices[:,0] += dx
vertices[:,1] += dy
vertices[:,2] += dz
triangles = np.array([[4,7,5],[4,6,7],[0,2,4],[2,6,4],
[0,1,2],[1,3,2],[1,5,7],[1,7,3],
[2,3,7],[2,7,6],[0,4,1],[1,4,5]])
box.vertices = o3d.utility.Vector3dVector(vertices)
box.triangles = o3d.utility.Vector3iVector(triangles)
return box
def create_table_cloud(width, height, depth, dx=0, dy=0, dz=0, grid_size=0.01):
'''
Author: chenxi-wang
**Input:**
- width/height/depth: float, table width/height/depth along x/z/y-axis in meters
- dx/dy/dz: float, offset along x/y/z-axis in meters
- grid_size: float, point distance along x/y/z-axis in meters
**Output:**
- open3d.geometry.PointCloud
'''
xmap = np.linspace(0, width, int(width/grid_size))
ymap = np.linspace(0, depth, int(depth/grid_size))
zmap = np.linspace(0, height, int(height/grid_size))
xmap, ymap, zmap = np.meshgrid(xmap, ymap, zmap, indexing='xy')
xmap += dx
ymap += dy
zmap += dz
points = np.stack([xmap, ymap, zmap], axis=-1)
points = points.reshape([-1, 3])
cloud = o3d.geometry.PointCloud()
cloud.points = o3d.utility.Vector3dVector(points)
return cloud
def create_axis(length,grid_size = 0.01):
num = int(length / grid_size)
xmap = np.linspace(0,length,num)
ymap = np.linspace(0,2*length,num)
zmap = np.linspace(0,3*length,num)
x_p = np.vstack([xmap.T,np.zeros((1,num)),np.zeros((1,num))])
y_p = np.vstack([np.zeros((1,num)),ymap.T,np.zeros((1,num))])
z_p = np.vstack([np.zeros((1,num)),np.zeros((1,num)),zmap.T])
p = np.hstack([x_p,y_p,z_p])
# print('p',p.shape)
cloud = o3d.geometry.PointCloud()
cloud.points = o3d.utility.Vector3dVector(p.T)
return cloud
def plot_axis(R,center,length,grid_size = 0.01):
num = int(length / grid_size)
xmap = np.linspace(0,length,num)
ymap = np.linspace(0,2*length,num)
zmap = np.linspace(0,3*length,num)
x_p = np.vstack([xmap.T,np.zeros((1,num)),np.zeros((1,num))])
y_p = np.vstack([np.zeros((1,num)),ymap.T,np.zeros((1,num))])
z_p = np.vstack([np.zeros((1,num)),np.zeros((1,num)),zmap.T])
p = np.hstack([x_p,y_p,z_p])
# print('p',p.shape)
p = np.dot(R, p).T + center
cloud = o3d.geometry.PointCloud()
cloud.points = o3d.utility.Vector3dVector(p)
return cloud
def plot_gripper_pro_max(center, R, width, depth, score=1, color=None):
'''
Author: chenxi-wang
**Input:**
- center: numpy array of (3,), target point as gripper center
- R: numpy array of (3,3), rotation matrix of gripper
- width: float, gripper width
- score: float, grasp quality score
**Output:**
- open3d.geometry.TriangleMesh
'''
x, y, z = center
height=0.004
finger_width = 0.004
tail_length = 0.04
depth_base = 0.02
if color is not None:
color_r, color_g, color_b = color
else:
color_r = score # red for high score
color_g = 0
color_b = 1 - score # blue for low score
left = create_mesh_box(depth+depth_base+finger_width, finger_width, height)
right = create_mesh_box(depth+depth_base+finger_width, finger_width, height)
bottom = create_mesh_box(finger_width, width, height)
tail = create_mesh_box(tail_length, finger_width, height)
left_points = np.array(left.vertices)
left_triangles = np.array(left.triangles)
left_points[:,0] -= depth_base + finger_width
left_points[:,1] -= width/2 + finger_width
left_points[:,2] -= height/2
right_points = np.array(right.vertices)
right_triangles = np.array(right.triangles) + 8
right_points[:,0] -= depth_base + finger_width
right_points[:,1] += width/2
right_points[:,2] -= height/2
bottom_points = np.array(bottom.vertices)
bottom_triangles = np.array(bottom.triangles) + 16
bottom_points[:,0] -= finger_width + depth_base
bottom_points[:,1] -= width/2
bottom_points[:,2] -= height/2
tail_points = np.array(tail.vertices)
tail_triangles = np.array(tail.triangles) + 24
tail_points[:,0] -= tail_length + finger_width + depth_base
tail_points[:,1] -= finger_width / 2
tail_points[:,2] -= height/2
vertices = np.concatenate([left_points, right_points, bottom_points, tail_points], axis=0)
vertices = np.dot(R, vertices.T).T + center
triangles = np.concatenate([left_triangles, right_triangles, bottom_triangles, tail_triangles], axis=0)
colors = np.array([ [color_r,color_g,color_b] for _ in range(len(vertices))])
gripper = o3d.geometry.TriangleMesh()
gripper.vertices = o3d.utility.Vector3dVector(vertices)
gripper.triangles = o3d.utility.Vector3iVector(triangles)
gripper.vertex_colors = o3d.utility.Vector3dVector(colors)
return gripper
def find_scene_by_model_id(dataset_root, model_id_list):
picked_scene_names = []
scene_names = ['scene_'+str(i).zfill(4) for i in range(190)]
for scene_name in scene_names:
try:
scene_reader = xmlReader(os.path.join(dataset_root, 'scenes', scene_name, 'kinect', 'annotations', '0000.xml'))
except:
continue
posevectors = scene_reader.getposevectorlist()
for posevector in posevectors:
obj_idx, _ = parse_posevector(posevector)
if obj_idx in model_id_list:
picked_scene_names.append(scene_name)
print(obj_idx, scene_name)
break
return picked_scene_names
def generate_scene(scene_idx, anno_idx, return_poses=False, align=False, camera='realsense'):
camera_poses = np.load(os.path.join('scenes','scene_%04d' %(scene_idx,),camera, 'camera_poses.npy'))
camera_pose = camera_poses[anno_idx]
if align:
align_mat = np.load(os.path.join('camera_poses', '{}_alignment.npy'.format(camera)))
camera_pose = align_mat.dot(camera_pose)
camera_split = 'data' if camera == 'realsense' else 'data_kinect'
# print('Scene {}, {}'.format(scene_idx, camera_split))
scene_reader = xmlReader(os.path.join(scenedir % (scene_idx, camera), 'annotations', '%04d.xml'%(anno_idx)))
posevectors = scene_reader.getposevectorlist()
obj_list = []
mat_list = []
model_list = []
pose_list = []
for posevector in posevectors:
obj_idx, mat = parse_posevector(posevector)
obj_list.append(obj_idx)
mat_list.append(mat)
for obj_idx, mat in zip(obj_list, mat_list):
model = o3d.io.read_point_cloud(os.path.join(modeldir, '%03d'%obj_idx, 'nontextured.ply'))
points = np.array(model.points)
pose = np.dot(camera_pose, mat)
points = transform_points(points, pose)
model.points = o3d.utility.Vector3dVector(points)
model_list.append(model)
pose_list.append(pose)
if return_poses:
return model_list, obj_list, pose_list
else:
return model_list
def get_obj_pose_list(camera_pose, pose_vectors):
import numpy as np
obj_list = []
mat_list = []
pose_list = []
for posevector in pose_vectors:
obj_idx, mat = parse_posevector(posevector)
obj_list.append(obj_idx)
mat_list.append(mat)
for obj_idx, mat in zip(obj_list, mat_list):
pose = np.dot(camera_pose, mat)
pose_list.append(pose)
return obj_list, pose_list
def batch_rgbdxyz_2_rgbxy_depth(points, camera):
'''
**Input:**
- points: np.array(-1,3) of the points in camera frame
- camera: string of the camera type
**Output:**
- coords: float of xy in pixel frame [-1, 2]
- depths: float of the depths of pixel frame [-1]
'''
intrinsics = get_camera_intrinsic(camera)
fx, fy = intrinsics[0,0], intrinsics[1,1]
cx, cy = intrinsics[0,2], intrinsics[1,2]
s = 1000.0
depths = s * points[:,2] # point_z
###################################
# x and y should be inverted here #
###################################
# y = point[0] / point[2] * fx + cx
# x = point[1] / point[2] * fy + cy
# cx = 640, cy = 360
coords_x = points[:,0] / points[:,2] * fx + cx
coords_y = points[:,1] / points[:,2] * fy + cy
coords = np.stack([coords_x, coords_y], axis=-1)
return coords, depths
def get_batch_key_points(centers, Rs, widths):
'''
**Input:**
- centers: np.array(-1,3) of the translation
- Rs: np.array(-1,3,3) of the rotation matrix
- widths: np.array(-1) of the grasp width
**Output:**
- key_points: np.array(-1,4,3) of the key point of the grasp
'''
import numpy as np
depth_base = 0.02
height = 0.02
key_points = np.zeros((centers.shape[0],4,3),dtype = np.float32)
key_points[:,:,0] -= depth_base
key_points[:,1:,1] -= widths[:,np.newaxis] / 2
key_points[:,2,2] += height / 2
key_points[:,3,2] -= height / 2
key_points = np.matmul(Rs, key_points.transpose(0,2,1)).transpose(0,2,1)
key_points = key_points + centers[:,np.newaxis,:]
return key_points
def batch_key_points_2_tuple(key_points, scores, object_ids, camera):
'''
**Input:**
- key_points: np.array(-1,4,3) of grasp key points, definition is shown in key_points.png
- scores: numpy array of batch grasp scores.
- camera: string of 'realsense' or 'kinect'.
**Output:**
- np.array([center_x,center_y,open_x,open_y,height])
'''
import numpy as np
centers, _ = batch_rgbdxyz_2_rgbxy_depth(key_points[:,0,:], camera)
opens, _ = batch_rgbdxyz_2_rgbxy_depth(key_points[:,1,:], camera)
lefts, _ = batch_rgbdxyz_2_rgbxy_depth(key_points[:,2,:], camera)
rights, _ = batch_rgbdxyz_2_rgbxy_depth(key_points[:,3,:], camera)
heights = np.linalg.norm(lefts - rights, axis=-1, keepdims=True)
tuples = np.concatenate([centers, opens, heights, scores[:, np.newaxis], object_ids[:, np.newaxis]], axis=-1).astype(np.float32)
return tuples
def framexy_depth_2_xyz(pixel_x, pixel_y, depth, camera):
'''
**Input:**
- pixel_x: int of the pixel x coordinate.
- pixel_y: int of the pixle y coordicate.
- depth: float of depth. The unit is millimeter.
- camera: string of type of camera. "realsense" or "kinect".
**Output:**
- x, y, z: float of x, y and z coordinates in camera frame. The unit is millimeter.
'''
intrinsics = get_camera_intrinsic(camera)
fx, fy = intrinsics[0,0], intrinsics[1,1]
cx, cy = intrinsics[0,2], intrinsics[1,2]
z = depth # mm
x = z / fx * (pixel_x - cx) # mm
y = z / fy * (pixel_y - cy) # mm
return x, y, z
def batch_framexy_depth_2_xyz(pixel_x, pixel_y, depth, camera):
'''
**Input:**
- pixel_x: numpy array of int of the pixel x coordinate. shape: (-1,)
- pixel_y: numpy array of int of the pixle y coordicate. shape: (-1,)
- depth: numpy array of float of depth. The unit is millimeter. shape: (-1,)
- camera: string of type of camera. "realsense" or "kinect".
**Output:**
x, y, z: numpy array of float of x, y and z coordinates in camera frame. The unit is millimeter.
'''
intrinsics = get_camera_intrinsic(camera)
fx, fy = intrinsics[0,0], intrinsics[1,1]
cx, cy = intrinsics[0,2], intrinsics[1,2]
z = depth # mm
x = z / fx * (pixel_x - cx) # mm
y = z / fy * (pixel_y - cy) # mm
return x, y, z
def center_depth(depths, center, open_point, upper_point):
'''
**Input:**
- depths: numpy array of the depths.
- center: numpy array of the center point.
- open_point: numpy array of the open point.
- upper_point: numpy array of the upper point.
**Output:**
- depth: float of the grasp depth.
'''
return depths[int(round(center[1])), int(round(center[0]))]
def batch_center_depth(depths, centers, open_points, upper_points):
'''
**Input:**
- depths: numpy array of the depths.
- centers: numpy array of the center points of shape(-1, 2).
- open_points: numpy array of the open points of shape(-1, 2).
- upper_points: numpy array of the upper points of shape(-1, 2).
**Output:**
- depths: numpy array of the grasp depth of shape (-1).
'''
x = np.round(centers[:,0]).astype(np.int32)
y = np.round(centers[:,1]).astype(np.int32)
return depths[y, x]
def key_point_2_rotation(center_xyz, open_point_xyz, upper_point_xyz):
'''
**Input:**
- center_xyz: numpy array of the center point.
- open_point_xyz: numpy array of the open point.
- upper_point_xyz: numpy array of the upper point.
**Output:**
- rotation: numpy array of the rotation matrix.
'''
open_point_vector = open_point_xyz - center_xyz
upper_point_vector = upper_point_xyz - center_xyz
unit_open_point_vector = open_point_vector / np.linalg.norm(open_point_vector)
unit_upper_point_vector = upper_point_vector / np.linalg.norm(upper_point_vector)
rotation = np.hstack((
np.array([[0],[0],[1.0]]),
unit_open_point_vector.reshape((-1, 1)),
unit_upper_point_vector.reshape((-1, 1))
))
return rotation
def batch_key_point_2_rotation(centers_xyz, open_points_xyz, upper_points_xyz):
'''
**Input:**
- centers_xyz: numpy array of the center points of shape (-1, 3).
- open_points_xyz: numpy array of the open points of shape (-1, 3).
- upper_points_xyz: numpy array of the upper points of shape (-1, 3).
**Output:**
- rotations: numpy array of the rotation matrix of shape (-1, 3, 3).
'''
# print('open_points_xyz:{}'.format(open_points_xyz))
# print('upper_points_xyz:{}'.format(upper_points_xyz))
open_points_vector = open_points_xyz - centers_xyz # (-1, 3)
upper_points_vector = upper_points_xyz - centers_xyz # (-1, 3)
open_point_norm = np.linalg.norm(open_points_vector, axis = 1).reshape(-1, 1)
upper_point_norm = np.linalg.norm(upper_points_vector, axis = 1).reshape(-1, 1)
# print('open_point_norm:{}, upper_point_norm:{}'.format(open_point_norm, upper_point_norm))
unit_open_points_vector = open_points_vector / np.hstack((open_point_norm, open_point_norm, open_point_norm)) # (-1, 3)
unit_upper_points_vector = upper_points_vector / np.hstack((upper_point_norm, upper_point_norm, upper_point_norm)) # (-1, 3)
num = open_points_vector.shape[0]
x_axis = np.hstack((np.zeros((num, 1)), np.zeros((num, 1)), np.ones((num, 1)))).astype(np.float32).reshape(-1, 3, 1)
rotations = np.dstack((x_axis, unit_open_points_vector.reshape((-1, 3, 1)), unit_upper_points_vector.reshape((-1, 3, 1))))
return rotations

View File

@@ -0,0 +1,383 @@
import os
import time
import numpy as np
import open3d as o3d
from transforms3d.euler import euler2mat, quat2mat
from .utils import generate_scene_model, generate_scene_pointcloud, generate_views, get_model_grasps, plot_gripper_pro_max, transform_points
from .rotation import viewpoint_params_to_matrix, batch_viewpoint_params_to_matrix
def create_table_cloud(width, height, depth, dx=0, dy=0, dz=0, grid_size=0.01):
'''
Author: chenxi-wang
**Input:**
- width/height/depth: float, table width/height/depth along x/z/y-axis in meters
- dx/dy/dz: float, offset along x/y/z-axis in meters
- grid_size: float, point distance along x/y/z-axis in meters
**Output:**
- open3d.geometry.PointCloud
'''
xmap = np.linspace(0, width, int(width/grid_size))
ymap = np.linspace(0, depth, int(depth/grid_size))
zmap = np.linspace(0, height, int(height/grid_size))
xmap, ymap, zmap = np.meshgrid(xmap, ymap, zmap, indexing='xy')
xmap += dx
ymap += dy
zmap += dz
points = np.stack([xmap, -ymap, -zmap], axis=-1)
points = points.reshape([-1, 3])
cloud = o3d.geometry.PointCloud()
cloud.points = o3d.utility.Vector3dVector(points)
return cloud
def get_camera_parameters(camera='kinect'):
'''
author: Minghao Gou
**Input:**
- camera: string of type of camera: 'kinect' or 'realsense'
**Output:**
- open3d.camera.PinholeCameraParameters
'''
import open3d as o3d
param = o3d.camera.PinholeCameraParameters()
param.extrinsic = np.eye(4,dtype=np.float64)
# param.intrinsic = o3d.camera.PinholeCameraIntrinsic()
if camera == 'kinect':
param.intrinsic.set_intrinsics(1280,720,631.5,631.2,639.5,359.5)
elif camera == 'realsense':
param.intrinsic.set_intrinsics(1280,720,927.17,927.37,639.5,359.5)
return param
def visAnno(dataset_root, scene_name, anno_idx, camera, num_grasp=10, th=0.3, align_to_table=True, max_width=0.08, save_folder='save_fig', show=False, per_obj=False):
'''
Author: chenxi-wang
**Input:**
- dataset_root: str, graspnet dataset root
- scene_name: str, name of scene folder, e.g. scene_0000
- anno_idx: int, frame index from 0-255
- camera: str, camera name (realsense or kinect)
- num_grasp: int, number of sampled grasps
- th: float, threshold of friction coefficient
- align_to_table: bool, transform to table coordinates if set to True
- max_width: float, only visualize grasps with width<=max_width
- save_folder: str, folder to save screen captures
- show: bool, show visualization in open3d window if set to True
- per_obj: bool, show grasps on each object
'''
model_list, obj_list, pose_list = generate_scene_model(dataset_root, scene_name, anno_idx, return_poses=True, align=align_to_table, camera=camera)
point_cloud = generate_scene_pointcloud(dataset_root, scene_name, anno_idx, align=align_to_table, camera=camera)
table = create_table_cloud(1.0, 0.02, 1.0, dx=-0.5, dy=-0.5, dz=0, grid_size=0.01)
num_views, num_angles, num_depths = 300, 12, 4
views = generate_views(num_views)
collision_label = np.load('{}/collision_label/{}/collision_labels.npz'.format(dataset_root,scene_name))
vis = o3d.visualization.Visualizer()
vis.create_window(width = 1280, height = 720)
ctr = vis.get_view_control()
param = get_camera_parameters(camera=camera)
if align_to_table:
cam_pos = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'cam0_wrt_table.npy'))
param.extrinsic = np.linalg.inv(cam_pos).tolist()
grippers = []
vis.add_geometry(point_cloud)
for i, (obj_idx, trans) in enumerate(zip(obj_list, pose_list)):
sampled_points, offsets, scores, _ = get_model_grasps('%s/grasp_label/%03d_labels.npz'%(dataset_root, obj_idx))
collision = collision_label['arr_{}'.format(i)]
cnt = 0
point_inds = np.arange(sampled_points.shape[0])
np.random.shuffle(point_inds)
for point_ind in point_inds:
target_point = sampled_points[point_ind]
offset = offsets[point_ind]
score = scores[point_ind]
view_inds = np.arange(300)
np.random.shuffle(view_inds)
flag = False
for v in view_inds:
if flag: break
view = views[v]
angle_inds = np.arange(12)
np.random.shuffle(angle_inds)
for a in angle_inds:
if flag: break
depth_inds = np.arange(4)
np.random.shuffle(depth_inds)
for d in depth_inds:
if flag: break
angle, depth, width = offset[v, a, d]
if score[v, a, d] > th or score[v, a, d] < 0:
continue
if width > max_width:
continue
if collision[point_ind, v, a, d]:
continue
R = viewpoint_params_to_matrix(-view, angle)
t = transform_points(target_point[np.newaxis,:], trans).squeeze()
R = np.dot(trans[:3,:3], R)
gripper = plot_gripper_pro_max(t, R, width, depth, 1.1-score[v, a, d])
grippers.append(gripper)
flag = True
if flag:
cnt += 1
if cnt == num_grasp:
break
if per_obj:
for gripper in grippers:
vis.add_geometry(gripper)
ctr.convert_from_pinhole_camera_parameters(param)
vis.poll_events()
filename = os.path.join(save_folder, '{}_{}_pointcloud_{}.png'.format(scene_name, camera, obj_idx))
if not os.path.exists(save_folder):
os.mkdir(save_folder)
vis.capture_screen_image(filename, do_render=True)
for gripper in grippers:
vis.remove_geometry(gripper)
grippers = []
if not per_obj:
for gripper in grippers:
vis.add_geometry(gripper)
ctr.convert_from_pinhole_camera_parameters(param)
vis.poll_events()
filename = os.path.join(save_folder, '{}_{}_pointcloud.png'.format(scene_name, camera))
if not os.path.exists(save_folder):
os.mkdir(save_folder)
vis.capture_screen_image(filename, do_render=True)
if show:
o3d.visualization.draw_geometries([point_cloud, *grippers])
vis.remove_geometry(point_cloud)
vis.add_geometry(table)
for model in model_list:
vis.add_geometry(model)
ctr.convert_from_pinhole_camera_parameters(param)
vis.poll_events()
filename = os.path.join(save_folder, '{}_{}_model.png'.format(scene_name, camera))
vis.capture_screen_image(filename, do_render=True)
if show:
o3d.visualization.draw_geometries([table, *model_list, *grippers])
def vis6D(dataset_root, scene_name, anno_idx, camera, align_to_table=True, save_folder='save_fig', show=False, per_obj=False):
'''
**Input:**
- dataset_root: str, graspnet dataset root
- scene_name: str, name of scene folder, e.g. scene_0000
- anno_idx: int, frame index from 0-255
- camera: str, camera name (realsense or kinect)
- align_to_table: bool, transform to table coordinates if set to True
- save_folder: str, folder to save screen captures
- show: bool, show visualization in open3d window if set to True
- per_obj: bool, show pose of each object
'''
model_list, obj_list, pose_list = generate_scene_model(dataset_root, scene_name, anno_idx, return_poses=True, align=align_to_table, camera=camera)
point_cloud = generate_scene_pointcloud(dataset_root, scene_name, anno_idx, align=align_to_table, camera=camera)
point_cloud = point_cloud.voxel_down_sample(voxel_size=0.005)
vis = o3d.visualization.Visualizer()
vis.create_window(width = 1280, height = 720)
ctr = vis.get_view_control()
param = get_camera_parameters(camera=camera)
if align_to_table:
cam_pos = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'cam0_wrt_table.npy'))
param.extrinsic = np.linalg.inv(cam_pos).tolist()
vis.add_geometry(point_cloud)
if per_obj:
for i,model in zip(obj_list,model_list):
vis.add_geometry(model)
ctr.convert_from_pinhole_camera_parameters(param)
vis.poll_events()
filename = os.path.join(save_folder, '{}_{}_6d_{}.png'.format(scene_name, camera, i))
vis.capture_screen_image(filename, do_render=True)
vis.remove_geometry(model)
else:
for model in model_list:
vis.add_geometry(model)
ctr.convert_from_pinhole_camera_parameters(param)
vis.poll_events()
filename = os.path.join(save_folder, '{}_{}_6d.png'.format(scene_name, camera))
vis.capture_screen_image(filename, do_render=True)
if show:
o3d.visualization.draw_geometries([point_cloud, *model_list])
def visObjGrasp(dataset_root, obj_idx, num_grasp=10, th=0.5, max_width=0.08, save_folder='save_fig', show=False):
'''
Author: chenxi-wang
**Input:**
- dataset_root: str, graspnet dataset root
- obj_idx: int, index of object model
- num_grasp: int, number of sampled grasps
- th: float, threshold of friction coefficient
- max_width: float, only visualize grasps with width<=max_width
- save_folder: str, folder to save screen captures
- show: bool, show visualization in open3d window if set to True
'''
plyfile = os.path.join(dataset_root, 'models', '%03d'%obj_idx, 'nontextured.ply')
model = o3d.io.read_point_cloud(plyfile)
num_views, num_angles, num_depths = 300, 12, 4
views = generate_views(num_views)
vis = o3d.visualization.Visualizer()
vis.create_window(width = 1280, height = 720)
ctr = vis.get_view_control()
param = get_camera_parameters(camera='kinect')
cam_pos = np.load(os.path.join(dataset_root, 'scenes', 'scene_0000', 'kinect', 'cam0_wrt_table.npy'))
param.extrinsic = np.linalg.inv(cam_pos).tolist()
sampled_points, offsets, scores, _ = get_model_grasps('%s/grasp_label/%03d_labels.npz'%(dataset_root, obj_idx))
cnt = 0
point_inds = np.arange(sampled_points.shape[0])
np.random.shuffle(point_inds)
grippers = []
for point_ind in point_inds:
target_point = sampled_points[point_ind]
offset = offsets[point_ind]
score = scores[point_ind]
view_inds = np.arange(300)
np.random.shuffle(view_inds)
flag = False
for v in view_inds:
if flag: break
view = views[v]
angle_inds = np.arange(12)
np.random.shuffle(angle_inds)
for a in angle_inds:
if flag: break
depth_inds = np.arange(4)
np.random.shuffle(depth_inds)
for d in depth_inds:
if flag: break
angle, depth, width = offset[v, a, d]
if score[v, a, d] > th or score[v, a, d] < 0 or width > max_width:
continue
R = viewpoint_params_to_matrix(-view, angle)
t = target_point
gripper = plot_gripper_pro_max(t, R, width, depth, 1.1-score[v, a, d])
grippers.append(gripper)
flag = True
if flag:
cnt += 1
if cnt == num_grasp:
break
vis.add_geometry(model)
for gripper in grippers:
vis.add_geometry(gripper)
ctr.convert_from_pinhole_camera_parameters(param)
vis.poll_events()
filename = os.path.join(save_folder, 'object_{}_grasp.png'.format(obj_idx))
vis.capture_screen_image(filename, do_render=True)
if show:
o3d.visualization.draw_geometries([model, *grippers])
def vis_rec_grasp(rec_grasp_tuples,numGrasp,image_path,save_path,show=False):
'''
author: Minghao Gou
**Input:**
- rec_grasp_tuples: np.array of rectangle grasps
- numGrasp: int of total grasps number to show
- image_path: string of path of the image
- image_path: string of the path to save the image
- show: bool of whether to show the image
**Output:**
- no output but display the rectangle grasps in image
'''
import cv2
import numpy as np
import os
img = cv2.imread(image_path)
if len(rec_grasp_tuples) > numGrasp:
np.random.shuffle(rec_grasp_tuples)
rec_grasp_tuples = rec_grasp_tuples[0:numGrasp]
for rec_grasp_tuple in rec_grasp_tuples:
center_x,center_y,open_x,open_y,height,score = rec_grasp_tuple
center = np.array([center_x,center_y])
left = np.array([open_x,open_y])
axis = left - center
normal = np.array([-axis[1],axis[0]])
normal = normal / np.linalg.norm(normal) * height / 2
p1 = center + normal + axis
p2 = center + normal - axis
p3 = center - normal - axis
p4 = center - normal + axis
cv2.line(img, (int(p1[0]),int(p1[1])), (int(p2[0]),int(p2[1])), (0,0,255), 1, 8)
cv2.line(img, (int(p2[0]),int(p2[1])), (int(p3[0]),int(p3[1])), (255,0,0), 3, 8)
cv2.line(img, (int(p3[0]),int(p3[1])), (int(p4[0]),int(p4[1])), (0,0,255), 1, 8)
cv2.line(img, (int(p4[0]),int(p4[1])), (int(p1[0]),int(p1[1])), (255,0,0), 3, 8)
cv2.imwrite(save_path,img)
if show:
cv2.imshow('grasp',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
camera = 'kinect'
dataset_root = '../'
scene_name = 'scene_0000'
anno_idx = 0
obj_idx = 0
visAnno(dataset_root, scene_name, anno_idx, camera, num_grasp=1, th=0.5, align_to_table=True, max_width=0.08, save_folder='save_fig', show=False)
vis6D(dataset_root, scene_name, anno_idx, camera, align_to_table=True, save_folder='save_fig', show=False)
visObjGrasp(dataset_root, obj_idx, num_grasp=10, th=0.5, save_folder='save_fig', show=False)

View File

@@ -0,0 +1,158 @@
__author__ = 'Minghao Gou'
__version__ = '1.0'
from xml.etree.ElementTree import Element, SubElement, tostring
import xml.etree.ElementTree as ET
import xml.dom.minidom
from transforms3d.quaternions import mat2quat, quat2axangle
from transforms3d.euler import quat2euler
import numpy as np
from .trans3d import get_mat, pos_quat_to_pose_4x4
import os
from .pose import pose_list_from_pose_vector_list
class xmlWriter():
def __init__(self, topfromreader=None):
self.topfromreader = topfromreader
self.poselist = []
self.objnamelist = []
self.objpathlist = []
self.objidlist = []
def addobject(self, pose, objname, objpath, objid):
# pose is the 4x4 matrix representation of 6d pose
self.poselist.append(pose)
self.objnamelist.append(objname)
self.objpathlist.append(objpath)
self.objidlist.append(objid)
def objectlistfromposevectorlist(self, posevectorlist, objdir, objnamelist, objidlist):
self.poselist = []
self.objnamelist = []
self.objidlist = []
self.objpathlist = []
for i in range(len(posevectorlist)):
id, x, y, z, alpha, beta, gamma = posevectorlist[i]
objname = objnamelist[objidlist[i]]
self.addobject(get_mat(x, y, z, alpha, beta, gamma),
objname, os.path.join(objdir, objname), id)
def writexml(self, xmlfilename='scene.xml'):
if self.topfromreader is not None:
self.top = self.topfromreader
else:
self.top = Element('scene')
for i in range(len(self.poselist)):
obj_entry = SubElement(self.top, 'obj')
obj_name = SubElement(obj_entry, 'obj_id')
obj_name.text = str(self.objidlist[i])
obj_name = SubElement(obj_entry, 'obj_name')
obj_name.text = self.objnamelist[i]
obj_path = SubElement(obj_entry, 'obj_path')
obj_path.text = self.objpathlist[i]
pose = self.poselist[i]
pose_in_world = SubElement(obj_entry, 'pos_in_world')
pose_in_world.text = '{:.4f} {:.4f} {:.4f}'.format(
pose[0, 3], pose[1, 3], pose[2, 3])
rotationMatrix = pose[0:3, 0:3]
quat = mat2quat(rotationMatrix)
ori_in_world = SubElement(obj_entry, 'ori_in_world')
ori_in_world.text = '{:.4f} {:.4f} {:.4f} {:.4f}'.format(
quat[0], quat[1], quat[2], quat[3])
xmlstr = xml.dom.minidom.parseString(
tostring(self.top)).toprettyxml(indent=' ')
# remove blank line
xmlstr = "".join([s for s in xmlstr.splitlines(True) if s.strip()])
with open(xmlfilename, 'w') as f:
f.write(xmlstr)
#print('log:write annotation file '+xmlfilename)
class xmlReader():
def __init__(self, xmlfilename):
self.xmlfilename = xmlfilename
etree = ET.parse(self.xmlfilename)
self.top = etree.getroot()
def showinfo(self):
print('Resumed object(s) already stored in '+self.xmlfilename+':')
for i in range(len(self.top)):
print(self.top[i][1].text)
def gettop(self):
return self.top
def getposevectorlist(self):
# posevector foramat: [objectid,x,y,z,alpha,beta,gamma]
posevectorlist = []
for i in range(len(self.top)):
objectid = int(self.top[i][0].text)
objectname = self.top[i][1].text
objectpath = self.top[i][2].text
translationtext = self.top[i][3].text.split()
translation = []
for text in translationtext:
translation.append(float(text))
quattext = self.top[i][4].text.split()
quat = []
for text in quattext:
quat.append(float(text))
alpha, beta, gamma = quat2euler(quat)
x, y, z = translation
alpha *= (180.0 / np.pi)
beta *= (180.0 / np.pi)
gamma *= (180.0 / np.pi)
posevectorlist.append([objectid, x, y, z, alpha, beta, gamma])
return posevectorlist
def get_pose_list(self):
pose_vector_list = self.getposevectorlist()
return pose_list_from_pose_vector_list(pose_vector_list)
def empty_pose_vector(objectid):
# [object id,x,y,z,alpha,beta,gamma]
# alpha, beta and gamma are in degree
return [objectid, 0.0, 0.0, 0.4, 0.0, 0.0, 0.0]
def empty_pose_vector_list(objectidlist):
pose_vector_list = []
for id in objectidlist:
pose_vector_list.append(empty_pose_vector(id))
return pose_vector_list
def getposevectorlist(objectidlist, is_resume, num_frame, frame_number, xml_dir):
if not is_resume or (not os.path.exists(os.path.join(xml_dir, '%04d.xml' % num_frame))):
print('log:create empty pose vector list')
return empty_pose_vector_list(objectidlist)
else:
print('log:resume pose vector from ' +
os.path.join(xml_dir, '%04d.xml' % num_frame))
xmlfile = os.path.join(xml_dir, '%04d.xml' % num_frame)
mainxmlReader = xmlReader(xmlfile)
xmlposevectorlist = mainxmlReader.getposevectorlist()
posevectorlist = []
for objectid in objectidlist:
posevector = [objectid, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
for xmlposevector in xmlposevectorlist:
if xmlposevector[0] == objectid:
posevector = xmlposevector
posevectorlist.append(posevector)
return posevectorlist
def getframeposevectorlist(objectidlist, is_resume, frame_number, xml_dir):
frameposevectorlist = []
for num_frame in range(frame_number):
if not is_resume or (not os.path.exists(os.path.join(xml_dir,'%04d.xml' % num_frame))):
posevectorlist=getposevectorlist(objectidlist,False,num_frame,frame_number,xml_dir)
else:
posevectorlist=getposevectorlist(objectidlist,True,num_frame,frame_number,xml_dir)
frameposevectorlist.append(posevectorlist)
return frameposevectorlist

View File

@@ -0,0 +1,36 @@
from distutils.core import setup
from setuptools import find_packages
from setuptools.command.install import install
import os
setup(
name='graspnetAPI',
version='1.2.11',
description='graspnet API',
author='Hao-Shu Fang, Chenxi Wang, Minghao Gou',
author_email='gouminghao@gmail.com',
url='https://graspnet.net',
packages=find_packages(),
install_requires=[
'numpy==1.20.3',
'scipy',
'transforms3d==0.3.1',
'open3d>=0.8.0.0',
'trimesh',
'tqdm',
'Pillow',
'opencv-python',
'pillow',
'matplotlib',
'pywavefront',
'trimesh',
'scikit-image',
'autolab_core',
'autolab-perception',
'cvxopt',
'dill',
'h5py',
'scikit-learn',
'grasp_nms'
]
)