This commit is contained in:
2024-10-09 16:13:22 +00:00
commit 0ea3f048dc
437 changed files with 44406 additions and 0 deletions

0
configs/__init__.py Executable file
View File

74
configs/config.py Executable file
View File

@@ -0,0 +1,74 @@
import argparse
import os.path
import shutil
import yaml
class ConfigManager:
config = None
config_path = None
@staticmethod
def get(*args):
result = ConfigManager.config
for arg in args:
result = result[arg]
return result
@staticmethod
def load_config_with(config_file_path):
ConfigManager.config_path = config_file_path
if not os.path.exists(ConfigManager.config_path):
raise ValueError(f"Config file <{config_file_path}> does not exist")
with open(config_file_path, 'r') as file:
ConfigManager.config = yaml.safe_load(file)
@staticmethod
def backup_config_to(target_config_dir, file_name, prefix="config"):
file_name = f"{prefix}_{file_name}.yaml"
target_config_file_path = str(os.path.join(target_config_dir, file_name))
shutil.copy(ConfigManager.config_path, target_config_file_path)
@staticmethod
def load_config():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='', help='config file path')
args = parser.parse_args()
if args.config:
ConfigManager.load_config_with(args.config)
@staticmethod
def print_config(key: str = None, group: dict = None, level=0):
table_size = 80
if key and group:
value = group[key]
if type(value) is dict:
print("\t" * level + f"+-{key}:")
for k in value:
ConfigManager.print_config(k, value, level=level + 1)
else:
print("\t" * level + f"| {key}: {value}")
elif key:
ConfigManager.print_config(key, ConfigManager.config, level=level)
else:
print("+" + "-" * table_size + "+")
print(f"| Configurations in <{ConfigManager.config_path}>:")
print("+" + "-" * table_size + "+")
for key in ConfigManager.config:
ConfigManager.print_config(key, level=level + 1)
print("+" + "-" * table_size + "+")
''' ------------ Debug ------------ '''
if __name__ == "__main__":
test_args = ['--config', 'local_train_config.yaml']
test_parser = argparse.ArgumentParser()
test_parser.add_argument('--config', type=str, default='', help='config file path')
test_args = test_parser.parse_args(test_args)
if test_args.config:
ConfigManager.load_config_with(test_args.config)
ConfigManager.print_config()
print()
pipeline = ConfigManager.get('settings', 'train', 'batch_size')
ConfigManager.print_config('settings')
print(pipeline)

View File

@@ -0,0 +1,28 @@
# Preprocess config file
settings:
general:
seed: 0
cuda_visible_devices: 0,1,2,3,4,5,6,7
device: cuda
test_dir: ""
print: True
experiment:
name: "gsnet_new_data"
root_dir: "/mnt/h/BaiduSyncdisk/workspace/ws_active_pose/project/ActivePerception/experiments"
keep_exists: False
preprocess:
dataset_list:
- source: "nbv1"
data_type: "sample"
data_dir: "/mnt/d/Datasets"
scene_pts_num: 15000
batch_size: 1
voxel_size: 0.005
model:
general:
seed_feat_dim: 512
checkpoint_path: "/mnt/h/BaiduSyncdisk/workspace/ws_active_pose/project/weights/epoch10.tar"

View File

@@ -0,0 +1,64 @@
# Train config file
settings:
general:
seed: 0
cuda_visible_devices: "0,1,2,3,4,5,6,7"
device: cuda
test_dir: ""
print: True
experiment:
name: test_inference
root_dir: "experiments"
model_path: "H:\\BaiduSyncdisk\\workspace\\ws_active_pose\\project\\ActivePerception\\experiments\\sample_train_one_scene_overfit_foreground_0_cached\\checkpoints\\Epoch_last.pth"
use_cache: True
small_batch_overfit: False
test:
batch_size: 96
dataset_list:
- name: synthetic_test_sample
source: nbv1
data_type: sample
synthetic: True
ratio: 1.0
batch_size: 96
num_workers: 8
results:
save_data_keys: ["target_name","src_rot_mat"]
save_output_keys: ["in_process_sample"]
pipeline: # module_type: name
pts_encoder: pointnet
view_finder: gradient_field
datasets:
general:
data_dir: "/mnt/d/Datasets"
score_limit: 0.3
target_pts_num: 1024
scene_pts_num: 16384
canonical: False
modules:
general:
pts_channels: 3
feature_dim: 1024
per_point_feature: False
pts_encoder:
pointnet:
pointnet++:
params_name: light
view_finder:
gradient_field:
pose_mode: rot_matrix
regression_head: Rx_Ry
sample_mode: ode
sample_repeat: 50
sampling_steps: 500
sde_mode: ve
rgb_encoder:
dinov2:

96
configs/local_train_config.yaml Executable file
View File

@@ -0,0 +1,96 @@
# Train config file
settings:
general:
seed: 0
cuda_visible_devices: "0,1,2,3,4,5,6,7"
device: cuda
parallel: True
test_dir: ""
print: True
web_api:
host: "127.0.0.1"
port: 8888
experiment:
name: test_score_eval
root_dir: "experiments"
use_checkpoint: False
epoch: -1 # -1 stands for last epoch
max_epochs: 5000
save_checkpoint_interval: 1
test_first: True
use_cache: False
small_batch_overfit: False
small_batch_size: 100
small_batch_times: 100
train:
optimizer:
type: adam
lr: 0.0001
losses: # loss type : weight
gf_loss: 1.0
dataset:
name: synthetic_train_sample
source: nbv1
data_type: sample
synthetic: True
ratio: 1.0
batch_size: 80
num_workers: 8
test:
batch_size: 16
frequency: 1
dataset_list:
- name: synthetic_test_sample
source: nbv1
data_type: sample
synthetic: True
eval_list:
- delta_pose
- grasp_improvement
ratio: 0.01
batch_size: 16
num_workers: 8
pipeline: # module_type: name
pts_encoder: pointnet
view_finder: gradient_field
rgb_encoder: dinov2
datasets:
general:
data_dir: "/mnt/d/Datasets"
score_limit: 0.3
target_pts_num: 1024
scene_pts_num: 16384
canonical: False
image_size: 480
modules:
general:
pts_channels: 3
feature_dim: 1024
per_point_feature: False
pts_encoder:
pointnet:
pointnet++:
params_name: light
pointnet++rgb:
params_name: light
target_layer: 3
rgb_feat_dim: 384
view_finder:
gradient_field:
pose_mode: rot_matrix
regression_head: Rx_Ry
sample_mode: ode
sample_repeat: 50
sampling_steps: 500
sde_mode: ve
rgb_encoder:
dinov2:
model_name: "dinov2_vits14"

View File

@@ -0,0 +1,19 @@
# Train config file
settings:
general:
seed: 0
cuda_visible_devices: "0,1,2,3,4,5,6,7"
device: cuda
test_dir: ""
print: True
experiment:
name: test_view_generator
root_dir: "experiments"
web_api:
port: 8888
dataset:
data_dir: "/mnt/d/Datasets"

View File

@@ -0,0 +1,28 @@
# Preprocess config file
settings:
general:
seed: 0
cuda_visible_devices: 0,1,2,3,4,5,6,7
device: cuda
test_dir: ""
experiment:
name: "new_gsnet_full_preprocess_test"
root_dir: "experiments"
keep_exists: False
preprocess:
dataset_list:
- source: "nbv1"
data_type: "test"
data_dir: "../data"
source: nbv1
batch_size: 1
scene_pts_num: 15000
voxel_size: 0.005
model:
general:
seed_feat_dim: 512
checkpoint_path: "../weights/epoch10.tar"

View File

@@ -0,0 +1,26 @@
# Preprocess config file
settings:
general:
seed: 0
cuda_visible_devices: 0,1,2,3,4,5,6,7
device: cuda
test_dir: ""
experiment:
name: "foundationpose_preprocess_test"
root_dir: "experiments"
keep_exists: False
preprocess:
dataset_list:
- source: "nbv1"
data_type: "sample"
data_dir: "../data"
source: nbv1
batch_size: 1
voxel_size: 0.005
web_server:
host: "127.0.0.1"
port: 12345

View File

@@ -0,0 +1,26 @@
# Preprocess config file
settings:
general:
seed: 0
cuda_visible_devices: 0,1,2,3,4,5,6,7
device: cuda
test_dir: ""
experiment:
name: "rgb_feat_preprocessor_test"
root_dir: "experiments"
keep_exists: True
preprocess:
dataset_list:
- source: "nbv1"
data_type: "sample"
data_dir: "../data"
source: nbv1
batch_size: 128
image_size: 480
model:
general:
model_name: "dinov2_vits14"

118
configs/server_train_config.yaml Executable file
View File

@@ -0,0 +1,118 @@
# Train config file
settings:
general:
seed: 0
cuda_visible_devices: "0,1,2,3,4,5,6,7"
device: cuda
parallel: True
test_dir: ""
print: True
experiment:
name: new_full_training_test_using_fulldataset
root_dir: "experiments"
use_checkpoint: True
epoch: -1 # -1 stands for last epoch
max_epochs: 5000
save_checkpoint_interval: 1
test_first: True
use_cache: False
small_batch_overfit: False
small_batch_size: 100
small_batch_times: 100
grasp_model_path: ../weights/epoch10.tar
task: grasp_pose
web_api:
host: "127.0.0.1"
port: 12345
train:
optimizer:
type: adam
lr: 0.00001
losses: # loss type : weight
gf_loss: 1.0
dataset:
name: synthetic_train_train_dataset
source: nbv1
data_type: train
gsnet_label: train_gsnet_label
#foundation_pose_label: test_foundation_pose_label
synthetic: True
ratio: 0.05
batch_size: 128
num_workers: 48
test:
batch_size: 16
frequency: 3
dataset_list:
- name: synthetic_test_train_dataset
source: nbv1
data_type: train
gsnet_label: train_gsnet_label
#foundation_pose_label: sample_foundation_pose_label
synthetic: True
eval_list:
- delta_pose
- grasp_pose_improvement
#- object_pose_improvement
ratio: 0.000001
batch_size: 32
num_workers: 16
- name: synthetic_test_test_dataset
source: nbv1
data_type: test
gsnet_label: test_gsnet_label
#foundation_pose_label: sample_foundation_pose_label
synthetic: True
eval_list:
- delta_pose
- grasp_pose_improvement
#- object_pose_improvement
ratio: 0.000010
batch_size: 32
num_workers: 16
pipeline: # module_type: name
pts_encoder: pointnet
view_finder: gradient_field
#rgb_encoder: dinov2
datasets:
general:
data_dir: "../data"
score_limit: 0.2
target_pts_num: 1024
scene_pts_num: 16384
canonical: False
image_size: 480
rgb_feat_cache: True
modules:
general:
pts_channels: 3
feature_dim: 1024
per_point_feature: False
pts_encoder:
pointnet:
pointnet++:
params_name: light
pointnet++rgb:
params_name: light
target_layer: 3
rgb_feat_dim: 384
view_finder:
gradient_field:
pose_mode: rot_matrix
regression_head: Rx_Ry
sample_mode: ode
sample_repeat: 50
sampling_steps: 500
sde_mode: ve
rgb_encoder:
dinov2:
model_name: "dinov2_vits14"

View File

@@ -0,0 +1,19 @@
# Train config file
settings:
general:
seed: 0
cuda_visible_devices: "0,1,2,3,4,5,6,7"
device: cuda
test_dir: ""
print: True
experiment:
name: test_view_generator
root_dir: "experiments"
web_api:
port: 12348
dataset:
data_dir: "../data"