add load config file

This commit is contained in:
Huang Zhenbiao 2024-10-07 11:57:32 +08:00
parent bc8b3ded39
commit 09ec0abde4
9 changed files with 307 additions and 17 deletions

View File

@ -17,6 +17,8 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${TORCH_CXX_FLAGS}")
find_package(Python3 COMPONENTS Interpreter Development REQUIRED)
find_package(Python3 COMPONENTS NumPy)
find_package(yaml-cpp REQUIRED)
set(dependencies
pluginlib
rcpputils
@ -45,9 +47,11 @@ target_include_directories(${PROJECT_NAME}
"$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>"
"$<INSTALL_INTERFACE:include/${PROJECT_NAME}>"
PRIVATE
${YAML_CPP_INCLUDE_DIR}
src)
target_link_libraries(${PROJECT_NAME} PUBLIC
"${TORCH_LIBRARIES}"
yaml-cpp
)
ament_target_dependencies(
${PROJECT_NAME} PUBLIC

View File

@ -11,8 +11,11 @@ Tested environment:
### 2.1 Installing libtorch
```bash
cd ~/CLionProjects/
wget https://download.pytorch.org/libtorch/nightly/cpu/libtorch-shared-with-deps-latest.zip
unzip libtorch-shared-with-deps-latest.zip
wget https://download.pytorch.org/libtorch/cpu/libtorch-cxx11-abi-shared-with-deps-2.0.1%2Bcpu.zip
unzip libtorch-cxx11-abi-shared-with-deps-2.0.1+cpu.zip -d ./
```
```bash
cd ~/CLionProjects/
rm -rf libtorch-shared-with-deps-latest.zip
echo 'export Torch_DIR=~/CLionProjects/libtorch' >> ~/.bashrc
```

View File

@ -10,10 +10,80 @@
#include "FSMState.h"
template<typename T>
struct RobotCommand {
struct MotorCommand {
std::vector<T> q = std::vector<T>(32, 0.0);
std::vector<T> dq = std::vector<T>(32, 0.0);
std::vector<T> tau = std::vector<T>(32, 0.0);
std::vector<T> kp = std::vector<T>(32, 0.0);
std::vector<T> kd = std::vector<T>(32, 0.0);
} motor_command;
};
template<typename T>
struct RobotState {
struct IMU {
std::vector<T> quaternion = {1.0, 0.0, 0.0, 0.0}; // w, x, y, z
std::vector<T> gyroscope = {0.0, 0.0, 0.0};
std::vector<T> accelerometer = {0.0, 0.0, 0.0};
} imu;
struct MotorState {
std::vector<T> q = std::vector<T>(32, 0.0);
std::vector<T> dq = std::vector<T>(32, 0.0);
std::vector<T> ddq = std::vector<T>(32, 0.0);
std::vector<T> tauEst = std::vector<T>(32, 0.0);
std::vector<T> cur = std::vector<T>(32, 0.0);
} motor_state;
};
struct ModelParams {
std::string model_name;
std::string framework;
bool use_history;
double dt;
int decimation;
int num_observations;
std::vector<std::string> observations;
double damping;
double stiffness;
double action_scale;
double hip_scale_reduction;
std::vector<int> hip_scale_reduction_indices;
int num_of_dofs;
double lin_vel_scale;
double ang_vel_scale;
double dof_pos_scale;
double dof_vel_scale;
double clip_obs;
torch::Tensor clip_actions_upper;
torch::Tensor clip_actions_lower;
torch::Tensor torque_limits;
torch::Tensor rl_kd;
torch::Tensor rl_kp;
torch::Tensor fixed_kp;
torch::Tensor fixed_kd;
torch::Tensor commands_scale;
torch::Tensor default_dof_pos;
std::vector<std::string> joint_controller_names;
};
struct Observations
{
torch::Tensor lin_vel;
torch::Tensor ang_vel;
torch::Tensor gravity_vec;
torch::Tensor commands;
torch::Tensor base_quat;
torch::Tensor dof_pos;
torch::Tensor dof_vel;
torch::Tensor actions;
};
class StateRL final : public FSMState {
public:
explicit StateRL(CtrlComponent &ctrl_component);
explicit StateRL(CtrlComponent &ctrl_component, const std::string &config_path);
void enter() override;
@ -26,18 +96,18 @@ public:
private:
torch::Tensor computeObservation();
void loadYaml(const std::string &config_path);
static torch::Tensor quatRotateInverse(torch::Tensor q, torch::Tensor v, const std::string& framework);
/**
* @brief Forward the RL model to get the action
*/
torch::Tensor forward();
// Parameters
double linear_vel_scale_;
double angular_vel_scale_;
double clip_obs_;
torch::Tensor clip_actions_upper_;
torch::Tensor clip_actions_lower_;
bool use_history_;
ModelParams params_;
Observations obs;
// history buffer
std::shared_ptr<ObservationBuffer> history_obs_buf_;

View File

@ -4,8 +4,54 @@
#include "legged_gym_controller/FSM/StateRL.h"
StateRL::StateRL(CtrlComponent &ctrl_component) : FSMState(
#include <rclcpp/logging.hpp>
#include <yaml-cpp/yaml.h>
template<typename T>
std::vector<T> ReadVectorFromYaml(const YAML::Node &node) {
std::vector<T> values;
for (const auto &val: node) {
values.push_back(val.as<T>());
}
return values;
}
template<typename T>
std::vector<T> ReadVectorFromYaml(const YAML::Node &node, const std::string &framework, const int &rows,
const int &cols) {
std::vector<T> values;
for (const auto &val: node) {
values.push_back(val.as<T>());
}
if (framework == "isaacsim") {
std::vector<T> transposed_values(cols * rows);
for (int r = 0; r < rows; ++r) {
for (int c = 0; c < cols; ++c) {
transposed_values[c * rows + r] = values[r * cols + c];
}
}
return transposed_values;
}
if (framework == "isaacgym") {
return values;
}
throw std::invalid_argument("Unsupported framework: " + framework);
}
StateRL::StateRL(CtrlComponent &ctrl_component, const std::string &config_path) : FSMState(
FSMStateName::RL, "rl", ctrl_component) {
// read params from yaml
loadYaml(config_path);
// history
if (params_.use_history) {
history_obs_buf_ = std::make_shared<ObservationBuffer>(1, params_.num_observations, 6);
}
model = torch::jit::load(config_path + "/" + params_.model_name);
std::cout << "Model loaded: " << config_path + "/" + params_.model_name << std::endl;
}
void StateRL::enter() {
@ -31,17 +77,113 @@ FSMStateName StateRL::checkChange() {
torch::Tensor StateRL::computeObservation() {
std::vector<torch::Tensor> obs_list;
const torch::Tensor obs = cat(obs_list, 1);
torch::Tensor clamped_obs = clamp(obs, -clip_obs_, clip_obs_);
for (const std::string &observation: params_.observations) {
if (observation == "lin_vel") {
obs_list.push_back(obs.lin_vel * params_.lin_vel_scale);
} else if (observation == "ang_vel") {
// obs_list.push_back(obs.ang_vel * params_.ang_vel_scale); // TODO is QuatRotateInverse necessery?
obs_list.push_back(
this->QuatRotateInverse(obs.base_quat, obs.ang_vel, params_.framework) * params_.ang_vel_scale);
} else if (observation == "gravity_vec") {
obs_list.push_back(this->QuatRotateInverse(obs.base_quat, obs.gravity_vec, params_.framework));
} else if (observation == "commands") {
obs_list.push_back(obs.commands * params_.commands_scale);
} else if (observation == "dof_pos") {
obs_list.push_back((obs.dof_pos - params_.default_dof_pos) * params_.dof_pos_scale);
} else if (observation == "dof_vel") {
obs_list.push_back(obs.dof_vel * params_.dof_vel_scale);
} else if (observation == "actions") {
obs_list.push_back(obs.actions);
}
}
torch::Tensor obs = cat(obs_list, 1);
torch::Tensor clamped_obs = clamp(obs, -params_.clip_obs, params_.clip_obs);
return clamped_obs;
}
void StateRL::loadYaml(const std::string &config_path) {
YAML::Node config;
try {
config = YAML::LoadFile(config_path + "/config.yaml");
} catch ([[maybe_unused]] YAML::BadFile &e) {
RCLCPP_ERROR(rclcpp::get_logger("StateRL"), "The file '%s' does not exist", config_path.c_str());
return;
}
params_.model_name = config["model_name"].as<std::string>();
params_.model_name = config["model_name"].as<std::string>();
params_.framework = config["framework"].as<std::string>();
const int rows = config["rows"].as<int>();
const int cols = config["cols"].as<int>();
params_.use_history = config["use_history"].as<bool>();
params_.dt = config["dt"].as<double>();
params_.decimation = config["decimation"].as<int>();
params_.num_observations = config["num_observations"].as<int>();
params_.observations = ReadVectorFromYaml<std::string>(config["observations"]);
params_.clip_obs = config["clip_obs"].as<double>();
if (config["clip_actions_lower"].IsNull() && config["clip_actions_upper"].IsNull()) {
params_.clip_actions_upper = torch::tensor({}).view({1, -1});
params_.clip_actions_lower = torch::tensor({}).view({1, -1});
} else {
params_.clip_actions_upper = torch::tensor(
ReadVectorFromYaml<double>(config["clip_actions_upper"], params_.framework, rows, cols)).view({1, -1});
params_.clip_actions_lower = torch::tensor(
ReadVectorFromYaml<double>(config["clip_actions_lower"], params_.framework, rows, cols)).view({1, -1});
}
params_.action_scale = config["action_scale"].as<double>();
params_.hip_scale_reduction = config["hip_scale_reduction"].as<double>();
params_.hip_scale_reduction_indices = ReadVectorFromYaml<int>(config["hip_scale_reduction_indices"]);
params_.num_of_dofs = config["num_of_dofs"].as<int>();
params_.lin_vel_scale = config["lin_vel_scale"].as<double>();
params_.ang_vel_scale = config["ang_vel_scale"].as<double>();
params_.dof_pos_scale = config["dof_pos_scale"].as<double>();
params_.dof_vel_scale = config["dof_vel_scale"].as<double>();
// params_.commands_scale = torch::tensor(ReadVectorFromYaml<double>(config["commands_scale"])).view({1, -1});
params_.commands_scale = torch::tensor({params_.lin_vel_scale, params_.lin_vel_scale, params_.ang_vel_scale});
params_.rl_kp = torch::tensor(ReadVectorFromYaml<double>(config["rl_kp"], params_.framework, rows, cols)).view({
1, -1
});
params_.rl_kd = torch::tensor(ReadVectorFromYaml<double>(config["rl_kd"], params_.framework, rows, cols)).view({
1, -1
});
params_.fixed_kp = torch::tensor(ReadVectorFromYaml<double>(config["fixed_kp"], params_.framework, rows, cols)).
view({1, -1});
params_.fixed_kd = torch::tensor(ReadVectorFromYaml<double>(config["fixed_kd"], params_.framework, rows, cols)).
view({1, -1});
params_.torque_limits = torch::tensor(
ReadVectorFromYaml<double>(config["torque_limits"], params_.framework, rows, cols)).view({1, -1});
params_.default_dof_pos = torch::tensor(
ReadVectorFromYaml<double>(config["default_dof_pos"], params_.framework, rows, cols)).view({1, -1});
params_.joint_controller_names = ReadVectorFromYaml<std::string>(config["joint_controller_names"],
params_.framework, rows, cols);
}
torch::Tensor StateRL::quatRotateInverse(torch::Tensor q, torch::Tensor v, const std::string &framework) {
torch::Tensor q_w;
torch::Tensor q_vec;
if (framework == "isaacsim") {
q_w = q.index({torch::indexing::Slice(), 0});
q_vec = q.index({torch::indexing::Slice(), torch::indexing::Slice(1, 4)});
} else if (framework == "isaacgym") {
q_w = q.index({torch::indexing::Slice(), 3});
q_vec = q.index({torch::indexing::Slice(), torch::indexing::Slice(0, 3)});
}
c10::IntArrayRef shape = q.sizes();
const torch::Tensor a = v * (2.0 * torch::pow(q_w, 2) - 1.0).unsqueeze(-1);
const torch::Tensor b = torch::cross(q_vec, v, -1) * q_w.unsqueeze(-1) * 2.0;
const torch::Tensor c = q_vec * torch::bmm(q_vec.view({shape[0], 1, 3}), v.view({shape[0], 3, 1})).squeeze(-1) * 2.0;
return a - b + c;
}
torch::Tensor StateRL::forward() {
torch::autograd::GradMode::set_enabled(false);
torch::Tensor clamped_obs = computeObservation();
torch::Tensor actions;
if (use_history_) {
if (params_.use_history) {
history_obs_buf_->insert(clamped_obs);
history_obs_ = history_obs_buf_->getObsVec({0, 1, 2, 3, 4, 5});
actions = model.forward({history_obs_}).toTensor();
@ -49,8 +191,8 @@ torch::Tensor StateRL::forward() {
actions = model.forward({clamped_obs}).toTensor();
}
if (clip_actions_upper_.numel() != 0 && clip_actions_lower_.numel() != 0) {
return clamp(actions, clip_actions_lower_, clip_actions_upper_);
if (params_.clip_actions_upper.numel() != 0 && params_.clip_actions_lower.numel() != 0) {
return clamp(actions, params_.clip_actions_lower, params_.clip_actions_upper);
}
return actions;
}

View File

@ -75,6 +75,10 @@ namespace legged_gym_controller {
// imu sensor
imu_name_ = auto_declare<std::string>("imu_name", imu_name_);
imu_interface_types_ = auto_declare<std::vector<std::string> >("imu_interfaces", state_interface_types_);
// rl config folder
rl_config_folder_ = auto_declare<std::string>("config_folder", rl_config_folder_);
} catch (const std::exception &e) {
fprintf(stderr, "Exception thrown during init stage with message: %s \n", e.what());
return controller_interface::CallbackReturn::ERROR;
@ -131,6 +135,7 @@ namespace legged_gym_controller {
state_list_.passive = std::make_shared<StatePassive>(ctrl_comp_);
state_list_.fixedDown = std::make_shared<StateFixedDown>(ctrl_comp_);
state_list_.fixedStand = std::make_shared<StateFixedStand>(ctrl_comp_);
state_list_.rl = std::make_shared<StateRL>(ctrl_comp_, rl_config_folder_);
// Initialize FSM
current_state_ = state_list_.passive;
@ -162,7 +167,7 @@ namespace legged_gym_controller {
return ControllerInterface::on_error(previous_state);
}
std::shared_ptr<FSMState> LeggedGymController::getNextState(FSMStateName stateName) const {
std::shared_ptr<FSMState> LeggedGymController::getNextState(const FSMStateName stateName) const {
switch (stateName) {
case FSMStateName::INVALID:
return state_list_.invalid;
@ -172,6 +177,8 @@ namespace legged_gym_controller {
return state_list_.fixedDown;
case FSMStateName::FIXEDSTAND:
return state_list_.fixedStand;
case FSMStateName::RL:
return state_list_.rl;
default:
return state_list_.invalid;
}

View File

@ -5,6 +5,8 @@
#ifndef LEGGEDGYMCONTROLLER_H
#define LEGGEDGYMCONTROLLER_H
#include <controller_interface/controller_interface.hpp>
#include <legged_gym_controller/FSM/StateRL.h>
#include "legged_gym_controller/FSM/StateFixedStand.h"
#include "legged_gym_controller/FSM/StateFixedDown.h"
#include "legged_gym_controller/FSM/StatePassive.h"
@ -16,6 +18,7 @@ namespace legged_gym_controller {
std::shared_ptr<StatePassive> passive;
std::shared_ptr<StateFixedDown> fixedDown;
std::shared_ptr<StateFixedStand> fixedStand;
std::shared_ptr<StateRL> rl;
};
class LeggedGymController final : public controller_interface::ControllerInterface {
@ -76,6 +79,8 @@ namespace legged_gym_controller {
std::string foot_force_name_;
std::vector<std::string> foot_force_interface_types_;
std::string rl_config_folder_;
std::unordered_map<
std::string, std::vector<std::reference_wrapper<hardware_interface::LoanedCommandInterface> > *>
command_interface_map_ = {

View File

@ -0,0 +1,55 @@
model_name: "model_0702.pt"
framework: "isaacgym"
rows: 4
cols: 3
use_history: True
dt: 0.005
decimation: 4
num_observations: 45
observations: ["ang_vel", "gravity_vec", "commands", "dof_pos", "dof_vel", "actions"]
clip_obs: 100.0
clip_actions_lower: [-100, -100, -100,
-100, -100, -100,
-100, -100, -100,
-100, -100, -100]
clip_actions_upper: [100, 100, 100,
100, 100, 100,
100, 100, 100,
100, 100, 100]
rl_kp: [20, 20, 20,
20, 20, 20,
20, 20, 20,
20, 20, 20]
rl_kd: [0.5, 0.5, 0.5,
0.5, 0.5, 0.5,
0.5, 0.5, 0.5,
0.5, 0.5, 0.5]
fixed_kp: [80, 80, 80,
80, 80, 80,
80, 80, 80,
80, 80, 80]
fixed_kd: [3, 3, 3,
3, 3, 3,
3, 3, 3,
3, 3, 3]
hip_scale_reduction: 0.5
hip_scale_reduction_indices: [0, 3, 6, 9]
num_of_dofs: 12
action_scale: 0.25
lin_vel_scale: 2.0
ang_vel_scale: 0.25
dof_pos_scale: 1.0
dof_vel_scale: 0.05
commands_scale: [2.0, 2.0, 1.0]
torque_limits: [33.5, 33.5, 33.5,
33.5, 33.5, 33.5,
33.5, 33.5, 33.5,
33.5, 33.5, 33.5]
default_dof_pos: [ 0.1000, 0.8000, -1.5000,
-0.1000, 0.8000, -1.5000,
0.1000, 1.0000, -1.5000,
-0.1000, 1.0000, -1.5000]
joint_controller_names: ["FL_hip_controller", "FL_thigh_controller", "FL_calf_controller",
"FR_hip_controller", "FR_thigh_controller", "FR_calf_controller",
"RL_hip_controller", "RL_thigh_controller", "RL_calf_controller",
"RR_hip_controller", "RR_thigh_controller", "RR_calf_controller"]

View File

@ -47,7 +47,11 @@ def launch_setup(context, *args, **kwargs):
controller_manager = Node(
package="controller_manager",
executable="ros2_control_node",
parameters=[robot_controllers],
parameters=[robot_controllers,
{
'config_folder': os.path.join(get_package_share_directory(package_description), 'config',
'issacgym'),
}],
output="both",
)