added hopejr yaml
This commit is contained in:
parent
62b1896304
commit
c70054c0b9
|
@ -404,9 +404,7 @@ class OpenCVCamera:
|
||||||
|
|
||||||
h, w, _ = color_image.shape
|
h, w, _ = color_image.shape
|
||||||
if h != self.height or w != self.width:
|
if h != self.height or w != self.width:
|
||||||
raise OSError(
|
color_image = cv2.resize(color_image, (640, 480))
|
||||||
f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead."
|
|
||||||
)
|
|
||||||
|
|
||||||
if self.rotation is not None:
|
if self.rotation is not None:
|
||||||
color_image = cv2.rotate(color_image, self.rotation)
|
color_image = cv2.rotate(color_image, self.rotation)
|
||||||
|
|
|
@ -41,7 +41,7 @@ def log_control_info(robot: Robot, dt_s, episode_index=None, frame_index=None, f
|
||||||
|
|
||||||
# total step time displayed in milliseconds and its frequency
|
# total step time displayed in milliseconds and its frequency
|
||||||
log_dt("dt", dt_s)
|
log_dt("dt", dt_s)
|
||||||
|
return
|
||||||
# TODO(aliberts): move robot-specific logs logic in robot.print_logs()
|
# TODO(aliberts): move robot-specific logs logic in robot.print_logs()
|
||||||
if not robot.robot_type.startswith("stretch"):
|
if not robot.robot_type.startswith("stretch"):
|
||||||
for name in robot.leader_arms:
|
for name in robot.leader_arms:
|
||||||
|
@ -254,7 +254,6 @@ def control_loop(
|
||||||
start_episode_t = time.perf_counter()
|
start_episode_t = time.perf_counter()
|
||||||
while timestamp < control_time_s:
|
while timestamp < control_time_s:
|
||||||
start_loop_t = time.perf_counter()
|
start_loop_t = time.perf_counter()
|
||||||
|
|
||||||
if teleoperate:
|
if teleoperate:
|
||||||
observation, action = robot.teleop_step(record_data=True)
|
observation, action = robot.teleop_step(record_data=True)
|
||||||
else:
|
else:
|
||||||
|
@ -281,7 +280,7 @@ def control_loop(
|
||||||
busy_wait(1 / fps - dt_s)
|
busy_wait(1 / fps - dt_s)
|
||||||
|
|
||||||
dt_s = time.perf_counter() - start_loop_t
|
dt_s = time.perf_counter() - start_loop_t
|
||||||
log_control_info(robot, dt_s, fps=fps)
|
#log_control_info(robot, dt_s, fps=fps)
|
||||||
|
|
||||||
timestamp = time.perf_counter() - start_episode_t
|
timestamp = time.perf_counter() - start_episode_t
|
||||||
if events["exit_early"]:
|
if events["exit_early"]:
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,97 @@
|
||||||
|
# @package _global_
|
||||||
|
|
||||||
|
# Use `act_koch_real.yaml` to train on real-world datasets collected on Alexander Koch's robots.
|
||||||
|
# Compared to `act.yaml`, it contains 2 cameras (i.e. laptop, phone) instead of 1 camera (i.e. top).
|
||||||
|
# Also, `training.eval_freq` is set to -1. This config is used to evaluate checkpoints at a certain frequency of training steps.
|
||||||
|
# When it is set to -1, it deactivates evaluation. This is because real-world evaluation is done through our `control_robot.py` script.
|
||||||
|
# Look at the documentation in header of `control_robot.py` for more information on how to collect data , train and evaluate a policy.
|
||||||
|
#
|
||||||
|
# Example of usage for training:
|
||||||
|
# ```bash
|
||||||
|
# python lerobot/scripts/train.py \
|
||||||
|
# policy=act_koch_real \
|
||||||
|
# env=koch_real
|
||||||
|
# ```
|
||||||
|
|
||||||
|
seed: 1000
|
||||||
|
dataset_repo_id: nepyope/hopejr_test
|
||||||
|
|
||||||
|
override_dataset_stats:
|
||||||
|
|
||||||
|
observation.images.phone:
|
||||||
|
# stats from imagenet, since we use a pretrained vision model
|
||||||
|
mean: [[[0.485]], [[0.456]], [[0.406]]] # (c,1,1)
|
||||||
|
std: [[[0.229]], [[0.224]], [[0.225]]] # (c,1,1)
|
||||||
|
|
||||||
|
training:
|
||||||
|
offline_steps: 80000
|
||||||
|
online_steps: 0
|
||||||
|
eval_freq: -1
|
||||||
|
save_freq: 10000
|
||||||
|
log_freq: 100
|
||||||
|
save_checkpoint: true
|
||||||
|
|
||||||
|
batch_size: 8
|
||||||
|
lr: 1e-5
|
||||||
|
lr_backbone: 1e-5
|
||||||
|
weight_decay: 1e-4
|
||||||
|
grad_clip_norm: 10
|
||||||
|
online_steps_between_rollouts: 1
|
||||||
|
|
||||||
|
delta_timestamps:
|
||||||
|
action: "[i / ${fps} for i in range(${policy.chunk_size})]"
|
||||||
|
|
||||||
|
eval:
|
||||||
|
n_episodes: 50
|
||||||
|
batch_size: 50
|
||||||
|
|
||||||
|
# See `configuration_act.py` for more details.
|
||||||
|
policy:
|
||||||
|
name: act
|
||||||
|
|
||||||
|
# Input / output structure.
|
||||||
|
n_obs_steps: 1
|
||||||
|
chunk_size: 100
|
||||||
|
n_action_steps: 100
|
||||||
|
|
||||||
|
input_shapes:
|
||||||
|
# TODO(rcadene, alexander-soare): add variables for height and width from the dataset/env?
|
||||||
|
observation.images.phone: [3, 480, 640]
|
||||||
|
observation.state: ["${env.state_dim}"]
|
||||||
|
output_shapes:
|
||||||
|
action: ["${env.action_dim}"]
|
||||||
|
|
||||||
|
# Normalization / Unnormalization
|
||||||
|
input_normalization_modes:
|
||||||
|
observation.images.phone: mean_std
|
||||||
|
observation.state: mean_std
|
||||||
|
output_normalization_modes:
|
||||||
|
action: mean_std
|
||||||
|
|
||||||
|
# Architecture.
|
||||||
|
# Vision backbone.
|
||||||
|
vision_backbone: resnet18
|
||||||
|
pretrained_backbone_weights: ResNet18_Weights.IMAGENET1K_V1
|
||||||
|
replace_final_stride_with_dilation: false
|
||||||
|
# Transformer layers.
|
||||||
|
pre_norm: false
|
||||||
|
dim_model: 512
|
||||||
|
n_heads: 8
|
||||||
|
dim_feedforward: 3200
|
||||||
|
feedforward_activation: relu
|
||||||
|
n_encoder_layers: 4
|
||||||
|
# Note: Although the original ACT implementation has 7 for `n_decoder_layers`, there is a bug in the code
|
||||||
|
# that means only the first layer is used. Here we match the original implementation by setting this to 1.
|
||||||
|
# See this issue https://github.com/tonyzhaozh/act/issues/25#issue-2258740521.
|
||||||
|
n_decoder_layers: 1
|
||||||
|
# VAE.
|
||||||
|
use_vae: true
|
||||||
|
latent_dim: 32
|
||||||
|
n_vae_encoder_layers: 4
|
||||||
|
|
||||||
|
# Inference.
|
||||||
|
temporal_ensemble_momentum: null
|
||||||
|
|
||||||
|
# Training and loss computation.
|
||||||
|
dropout: 0.1
|
||||||
|
kl_weight: 10.0
|
|
@ -6,14 +6,14 @@
|
||||||
|
|
||||||
# See [tutorial](https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md)
|
# See [tutorial](https://github.com/huggingface/lerobot/blob/main/examples/10_use_so100.md)
|
||||||
|
|
||||||
_target_: lerobot.common.robot_devices.robots.manipulator.ManipulatorRobot
|
_target_: lerobot.common.robot_devices.robots.hopejr.HopeJrRobot
|
||||||
robot_type: hopejr
|
# robot_type: hopejr
|
||||||
calibration_dir: .cache/calibration/hopejr
|
# calibration_dir: .cache/calibration/hopejr
|
||||||
|
|
||||||
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
# # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
|
||||||
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
# # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
|
||||||
# the number of motors in your follower arms.
|
# # the number of motors in your follower arms.
|
||||||
max_relative_target: null
|
# max_relative_target: null
|
||||||
|
|
||||||
# leader_arms:
|
# leader_arms:
|
||||||
# main:
|
# main:
|
||||||
|
@ -28,46 +28,46 @@ max_relative_target: null
|
||||||
# wrist_roll: [5, "sts3215"]
|
# wrist_roll: [5, "sts3215"]
|
||||||
# gripper: [6, "sts3215"]
|
# gripper: [6, "sts3215"]
|
||||||
|
|
||||||
follower_arms:
|
# follower_arms:
|
||||||
main:
|
# main:
|
||||||
_target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus
|
# _target_: lerobot.common.robot_devices.motors.feetech.FeetechMotorsBus
|
||||||
port: /dev/tty.usbserial-2130
|
# port: /dev/tty.usbserial-2130
|
||||||
motors:
|
# motors:
|
||||||
# name: (index, model)
|
# # name: (index, model)
|
||||||
shoulder_pitch: [1, "sts3250"]
|
# shoulder_pitch: [1, "sts3250"]
|
||||||
shoulder_yaw: [2, "sts3215"] # TODO: sts3250
|
# shoulder_yaw: [2, "sts3215"] # TODO: sts3250
|
||||||
shoulder_roll: [3, "sts3215"] # TODO: sts3250
|
# shoulder_roll: [3, "sts3215"] # TODO: sts3250
|
||||||
elbow_flex: [4, "sts3250"]
|
# elbow_flex: [4, "sts3250"]
|
||||||
wrist_roll: [5, "sts3215"]
|
# wrist_roll: [5, "sts3215"]
|
||||||
wrist_yaw: [6, "sts3215"]
|
# wrist_yaw: [6, "sts3215"]
|
||||||
wrist_pitch: [7, "sts3215"]
|
# wrist_pitch: [7, "sts3215"]
|
||||||
thumb_basel_rotation: [30, "scs0009"]
|
# thumb_basel_rotation: [30, "scs0009"]
|
||||||
thumb_flexion: [27, "scs0009"]
|
# thumb_flexion: [27, "scs0009"]
|
||||||
thumb_pinky_side: [26, "scs0009"]
|
# thumb_pinky_side: [26, "scs0009"]
|
||||||
thumb_thumb_side: [28, "scs0009"]
|
# thumb_thumb_side: [28, "scs0009"]
|
||||||
index_flexor: [25, "scs0009"]
|
# index_flexor: [25, "scs0009"]
|
||||||
index_pinky_side: [31, "scs0009"]
|
# index_pinky_side: [31, "scs0009"]
|
||||||
index_thumb_side: [32, "scs0009"]
|
# index_thumb_side: [32, "scs0009"]
|
||||||
middle_flexor: [24, "scs0009"]
|
# middle_flexor: [24, "scs0009"]
|
||||||
middle_pinky_side: [33, "scs0009"]
|
# middle_pinky_side: [33, "scs0009"]
|
||||||
middle_thumb_side: [34, "scs0009"]
|
# middle_thumb_side: [34, "scs0009"]
|
||||||
ring_flexor: [21, "scs0009"]
|
# ring_flexor: [21, "scs0009"]
|
||||||
ring_pinky_side: [36, "scs0009"]
|
# ring_pinky_side: [36, "scs0009"]
|
||||||
ring_thumb_side: [35, "scs0009"]
|
# ring_thumb_side: [35, "scs0009"]
|
||||||
pinky_flexor: [23, "scs0009"]
|
# pinky_flexor: [23, "scs0009"]
|
||||||
pinky_pinky_side: [38, "scs0009"]
|
# pinky_pinky_side: [38, "scs0009"]
|
||||||
pinky_thumb_side: [37, "scs0009"]
|
# pinky_thumb_side: [37, "scs0009"]
|
||||||
|
|
||||||
cameras:
|
cameras:
|
||||||
laptop:
|
# laptop:
|
||||||
|
# _target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
|
||||||
|
# camera_index: 0
|
||||||
|
# fps: 30
|
||||||
|
# width: 640
|
||||||
|
# height: 480
|
||||||
|
phone:
|
||||||
_target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
|
_target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
|
||||||
camera_index: 0
|
camera_index: 0
|
||||||
fps: 30
|
fps: 30
|
||||||
width: 640
|
width: 640
|
||||||
height: 480
|
height: 480
|
||||||
phone:
|
|
||||||
_target_: lerobot.common.robot_devices.cameras.opencv.OpenCVCamera
|
|
||||||
camera_index: 1
|
|
||||||
fps: 30
|
|
||||||
width: 640
|
|
||||||
height: 480
|
|
||||||
|
|
|
@ -258,7 +258,7 @@ def record(
|
||||||
# 3. place the cameras windows on screen
|
# 3. place the cameras windows on screen
|
||||||
enable_teleoperation = policy is None
|
enable_teleoperation = policy is None
|
||||||
log_say("Warmup record", play_sounds)
|
log_say("Warmup record", play_sounds)
|
||||||
warmup_record(robot, events, enable_teleoperation, warmup_time_s, display_cameras, fps)
|
# warmup_record(robot, events, enable_teleoperation, warmup_time_s, display_cameras, fps)
|
||||||
|
|
||||||
if has_method(robot, "teleop_safety_stop"):
|
if has_method(robot, "teleop_safety_stop"):
|
||||||
robot.teleop_safety_stop()
|
robot.teleop_safety_stop()
|
||||||
|
|
Loading…
Reference in New Issue