fixing et al.

This commit is contained in:
yjinzero 2025-02-15 17:04:25 +09:00
parent 412e1c8579
commit 2376b94367
5 changed files with 121 additions and 38 deletions

View File

@ -61,9 +61,12 @@ class ActToDof:
q_lab = obs[..., 32:61]
q_pin = np.zeros_like(self.ikctrl.cfg.q)
q_pin[self.pin_from_lab] = q_lab
q_pin[self.pin_from_lab] += np.asarray(self.config.lab_joint_offsets)
q_mot = np.zeros(29)
q_mot[self.mot_from_lab] = q_lab
q_mot[self.mot_from_lab] += np.asarray(self.config.lab_joint_offsets)
#print('q_mot (inside)', q_mot)
# print('q0', q_mot[self.mot_from_arm])
# q_mot[i_mot] = q_lab[ lab_from_mot[i_mot] ]
@ -71,6 +74,7 @@ class ActToDof:
d_quat = quat_from_angle_axis(
torch.from_numpy(hands_command[..., 3:])
).detach().cpu().numpy()
# print('d_quat', d_quat)
source_pose = self.ikctrl.fk(q_pin)
source_xyz = source_pose.translation
@ -81,11 +85,13 @@ class ActToDof:
torch.from_numpy(d_quat),
torch.from_numpy(source_quat)).detach().cpu().numpy()
target = np.concatenate([target_xyz, target_quat])
# print('target', target)
# print('q_pin', q_pin)
res_q_ik = self.ikctrl(
q_pin,
target
)
# print('res_q_ik', res_q_ik)
# q_pin2 = np.copy(q_pin)
# q_pin2[self.pin_from_arm] += res_q_ik
@ -102,22 +108,24 @@ class ActToDof:
target_dof_pos = np.zeros(29)
target_dof_pos += q_mot
target_dof_pos[self.mot_from_arm] += res_q_ik
target_dof_pos[self.mot_from_arm] += np.clip(
0.3 * left_arm_residual,
-0.2, 0.2)
# print('default joint pos', self.default_nonarm)
# print('joint order', self.config.non_arm_joint)
# print('mot_from_nonarm', self.mot_from_nonarm)
target_dof_pos[self.mot_from_nonarm] = (
self.default_nonarm + 0.5 * non_arm_joint_pos
)
if True:
target_dof_pos[self.mot_from_arm] += np.clip(
0.3 * left_arm_residual,
-0.2, 0.2)
target_dof_pos[self.mot_from_arm] = np.clip(
target_dof_pos[self.mot_from_arm],
self.lim_lo_pin[self.pin_from_arm],
self.lim_hi_pin[self.pin_from_arm]
)
# print('default joint pos', self.default_nonarm)
# print('joint order', self.config.non_arm_joint)
# print('mot_from_nonarm', self.mot_from_nonarm)
target_dof_pos[self.mot_from_nonarm] = (
self.default_nonarm + 0.5 * non_arm_joint_pos
)
target_dof_pos[self.mot_from_arm] = np.clip(
target_dof_pos[self.mot_from_arm],
self.lim_lo_pin[self.pin_from_arm],
self.lim_hi_pin[self.pin_from_arm]
)
return target_dof_pos

View File

@ -102,20 +102,26 @@ all_joint2motor_idx: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
kps: [
100, 100, 100, 150, 40, 40, 100, 100, 100, 150, 40, 40,
150, 150, 150,
100, 100, 50, 50, 20, 20, 20,
100, 100, 50, 50, 20, 20, 20
# 100, 100, 50, 50, 20, 20, 20,
# 100, 100, 50, 50, 20, 20, 20
40, 40, 40, 40, 20, 20, 20,
40, 40, 40, 40, 20, 20, 20,
]
kds: [
# left leg
2, 2, 2, 4, 2, 2,
# 2, 2, 2, 4, 2, 2,
5, 5, 5, 5, 5, 5,
# right leg
2, 2, 2, 4, 2, 2,
# 2, 2, 2, 4, 2, 2,
5, 5, 5, 5, 5, 5,
# waist
3, 3, 3,
# left arm
2, 2, 2, 2, 1, 1, 1,
# 2, 2, 2, 2, 1, 1, 1,
10, 10, 10, 10, 10, 10, 10,
# right arm
2, 2, 2, 2, 1, 1, 1
# 2, 2, 2, 2, 1, 1, 1
10, 10, 10, 10, 10, 10, 10,
]
lab_joint_offsets: [
-0.2000, -0.2000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,

View File

@ -9,7 +9,7 @@ lowstate_topic: "rt/lowstate"
policy_path: "{LEGGED_GYM_ROOT_DIR}/deploy/pre_train/g1/policy.pt"
act_joint: [
arm_joint: [
"left_shoulder_pitch_joint",
"left_shoulder_roll_joint",
"left_shoulder_yaw_joint",

View File

@ -3,6 +3,7 @@ from typing import Union, List
import numpy as np
import time
import torch
import torch as th
from pathlib import Path
import rclpy as rp
@ -70,6 +71,10 @@ def axis_angle_from_quat(quat: np.ndarray, eps: float = 1.0e-6) -> np.ndarray:
)
return quat[..., 1:4] / sin_half_angles_over_angles[..., None]
def quat_conjugate(q):
return np.concatenate(
(q[..., 0:1], -q[..., 1:]),
axis=-1)
def quat_from_angle_axis(
angle: torch.Tensor,
@ -486,7 +491,7 @@ class Observation:
hand_pose,
projected_com,
joint_pos,
joint_vel,
0.0*joint_vel,
actions,
hands_command,
right_arm_com,
@ -551,6 +556,17 @@ class Controller:
# FIXME(ycho): give `root_state_w`
self.eetrack = None
if True:
q_mot = np.array(config.default_angles)
q_pin = np.zeros_like(self.ikctrl.cfg.q)
q_pin[self.pin_from_mot] = q_mot
default_pose = self.ikctrl.fk(q_pin)
xyz = default_pose.translation
quat_wxyz = xyzw2wxyz(pin.Quaternion(default_pose.rotation).coeffs())
self.default_pose = np.concatenate([xyz, quat_wxyz])
self.target_pose = np.copy(self.default_pose)
print('default_pose', self.default_pose)
if config.msg_type == "hg":
# g1 and h1_2 use the hg msg type
@ -579,7 +595,7 @@ class Controller:
elif config.msg_type == "go":
init_cmd_go(self.low_cmd, weak_motor=self.config.weak_motor)
self.mode = Mode.policy
self.mode = Mode.wait
self._mode_change = True
self._timer = self._node.create_timer(
self.config.control_dt, self.run_wrapper)
@ -708,6 +724,9 @@ class Controller:
self.cmd[1] = self.remote_controller.lx * -1
self.cmd[2] = self.remote_controller.rx * -1
if True:
self.target_pose[..., :3] += 0.01 * self.cmd
# FIXME(ycho): implement `_hands_command_`
# to use the output of `eetrack`.
@ -734,8 +753,36 @@ class Controller:
self.eetrack = eetrack(torch.from_numpy(root_state_w)[None],
self.tf_buffer)
_hands_command_ = self.eetrack.get_command(
torch.from_numpy(root_state_w)[None])[0].detach().cpu().numpy()
if False:
_hands_command_ = self.eetrack.get_command(
torch.from_numpy(root_state_w)[None])[0].detach().cpu().numpy()
else:
_hands_command_ = np.zeros(6)
#_hands_command_[0] = self.cmd[0] * 0.03
#_hands_command_[2] = self.cmd[1] * 0.03
if True:
q_mot = [self.low_state.motor_state[i_mot].q for i_mot in range(29)]
# print('q_mot (out)', q_mot)
q_pin = np.zeros_like(self.ikctrl.cfg.q)
q_pin[self.pin_from_mot] = q_mot
current_pose = self.ikctrl.fk(q_pin)
_hands_command_ = np.zeros(6)
_hands_command_[0:3] = (self.target_pose[:3]
- current_pose.translation)
quat_wxyz = xyzw2wxyz(pin.Quaternion(
current_pose.rotation).coeffs())
# q_target @ q_current^{-1}
d_quat = quat_mul(
torch.from_numpy(self.target_pose[3:7]),
torch.from_numpy(quat_conjugate(quat_wxyz))
).detach().cpu().numpy()
d_axa = axis_angle_from_quat(d_quat)
_hands_command_[3:6] = d_axa
# bprint('hands_command', _hands_command_)
# print(_hands_command_)
# _hands_command_ = np.zeros(6)
# _hands_command_[0] = self.cmd[0] * 0.03
@ -751,12 +798,31 @@ class Controller:
# Get the action from the policy network
obs_tensor = torch.from_numpy(self.obs).unsqueeze(0)
obs_tensor = obs_tensor.detach().clone()
# hands_command = obs[..., 119:125]
world_from_body_quat = math_utils.quat_from_euler_xyz(
th.as_tensor([0], dtype=th.float32),
th.as_tensor([0], dtype=th.float32),
th.as_tensor([1.57], dtype=th.float32)).reshape(4)
obs_tensor[..., 119:122] = math_utils.quat_rotate(
world_from_body_quat[None],
obs_tensor[..., 119:122])
obs_tensor[..., 122:125] = math_utils.quat_rotate(
world_from_body_quat[None],
obs_tensor[..., 122:125])
self.action = self.policy(obs_tensor).detach().numpy().squeeze()
# np.save(F'{logpath}/act{self.counter:03d}.npy',
# self.action)
target_dof_pos = self.actmap(self.obs,
self.action)
target_dof_pos = self.actmap(
self.obs,
self.action
)
# print('??',
# target_dof_pos,
# [self.low_state.motor_state[i_mot].q for i_mot in range(29)])
# np.save(F'{logpath}/dof{self.counter:03d}.npy',
# target_dof_pos)

View File

@ -30,7 +30,7 @@ class Controller:
self.config = config
self.remote_controller = RemoteController()
act_joint = config.act_joint
act_joint = config.arm_joint
self.ikctrl = IKCtrl('../../resources/robots/g1_description/g1_29dof_with_hand_rev_1_0.urdf',
act_joint)
self.lim_lo_pin = self.ikctrl.robot.model.lowerPositionLimit
@ -175,20 +175,23 @@ class Controller:
self._dof_idx = dof_idx
# record the current pos
self._init_dof_pos = np.zeros(self._dof_size,
dtype=np.float32)
for i in range(self._dof_size):
self._init_dof_pos[i] = self.low_state.motor_state[dof_idx[i]].q
self._init_dof_pos = np.zeros(29)
for i in range(29):
self._init_dof_pos[i] = self.low_state.motor_state[i].q
def move_to_default_pos(self):
# move to default pos
if self.counter < self._num_step:
alpha = self.counter / self._num_step
for j in range(self._dof_size):
motor_idx = self._dof_idx[j]
target_pos = self._default_pos[j]
self.low_cmd.motor_cmd[motor_idx].q = (self._init_dof_pos[j] *
(1 - alpha) + target_pos * alpha)
#for j in range(self._dof_size):
for j in range(29):
# motor_idx = self._dof_idx[j]
# target_pos = self._default_pos[j]
motor_idx = j
target_pos = self.config.default_angles[j]
self.low_cmd.motor_cmd[motor_idx].q = (
self._init_dof_pos[j] * (1 - alpha) + target_pos * alpha)
self.low_cmd.motor_cmd[motor_idx].dq = 0.0
self.low_cmd.motor_cmd[motor_idx].kp = self._kps[j]
self.low_cmd.motor_cmd[motor_idx].kd = self._kds[j]