mirror of https://github.com/fan-ziqi/rl_sar.git
feat: Handle situations that do not require clamp
This commit is contained in:
parent
4868c7d065
commit
db09012cd5
|
@ -397,8 +397,16 @@ void RL::ReadYaml(std::string robot_name)
|
||||||
this->params.num_observations = config["num_observations"].as<int>();
|
this->params.num_observations = config["num_observations"].as<int>();
|
||||||
this->params.observations = ReadVectorFromYaml<std::string>(config["observations"]);
|
this->params.observations = ReadVectorFromYaml<std::string>(config["observations"]);
|
||||||
this->params.clip_obs = config["clip_obs"].as<double>();
|
this->params.clip_obs = config["clip_obs"].as<double>();
|
||||||
this->params.clip_actions_upper = torch::tensor(ReadVectorFromYaml<double>(config["clip_actions_upper"], this->params.framework, rows, cols)).view({1, -1});
|
if(config["clip_actions_lower"] && config["clip_actions_upper"])
|
||||||
this->params.clip_actions_lower = torch::tensor(ReadVectorFromYaml<double>(config["clip_actions_lower"], this->params.framework, rows, cols)).view({1, -1});
|
{
|
||||||
|
this->params.clip_actions_upper = torch::tensor(ReadVectorFromYaml<double>(config["clip_actions_upper"], this->params.framework, rows, cols)).view({1, -1});
|
||||||
|
this->params.clip_actions_lower = torch::tensor(ReadVectorFromYaml<double>(config["clip_actions_lower"], this->params.framework, rows, cols)).view({1, -1});
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
this->params.clip_actions_upper = torch::tensor({}).view({1, -1});
|
||||||
|
this->params.clip_actions_lower = torch::tensor({}).view({1, -1});
|
||||||
|
}
|
||||||
this->params.action_scale = config["action_scale"].as<double>();
|
this->params.action_scale = config["action_scale"].as<double>();
|
||||||
this->params.hip_scale_reduction = config["hip_scale_reduction"].as<double>();
|
this->params.hip_scale_reduction = config["hip_scale_reduction"].as<double>();
|
||||||
this->params.hip_scale_reduction_indices = ReadVectorFromYaml<int>(config["hip_scale_reduction_indices"]);
|
this->params.hip_scale_reduction_indices = ReadVectorFromYaml<int>(config["hip_scale_reduction_indices"]);
|
||||||
|
|
|
@ -387,8 +387,12 @@ class RL:
|
||||||
self.params.action_scale = config["action_scale"]
|
self.params.action_scale = config["action_scale"]
|
||||||
self.params.hip_scale_reduction = config["hip_scale_reduction"]
|
self.params.hip_scale_reduction = config["hip_scale_reduction"]
|
||||||
self.params.hip_scale_reduction_indices = config["hip_scale_reduction_indices"]
|
self.params.hip_scale_reduction_indices = config["hip_scale_reduction_indices"]
|
||||||
self.params.clip_actions_upper = torch.tensor(self.ReadVectorFromYaml(config["clip_actions_upper"], self.params.framework, rows, cols)).view(1, -1)
|
if config["clip_actions_upper"] and config["clip_actions_upper"]:
|
||||||
self.params.clip_actions_lower = torch.tensor(self.ReadVectorFromYaml(config["clip_actions_lower"], self.params.framework, rows, cols)).view(1, -1)
|
self.params.clip_actions_upper = torch.tensor(self.ReadVectorFromYaml(config["clip_actions_upper"], self.params.framework, rows, cols)).view(1, -1)
|
||||||
|
self.params.clip_actions_lower = torch.tensor(self.ReadVectorFromYaml(config["clip_actions_lower"], self.params.framework, rows, cols)).view(1, -1)
|
||||||
|
else:
|
||||||
|
self.params.clip_actions_upper = None
|
||||||
|
self.params.clip_actions_lower = None
|
||||||
self.params.num_of_dofs = config["num_of_dofs"]
|
self.params.num_of_dofs = config["num_of_dofs"]
|
||||||
self.params.lin_vel_scale = config["lin_vel_scale"]
|
self.params.lin_vel_scale = config["lin_vel_scale"]
|
||||||
self.params.ang_vel_scale = config["ang_vel_scale"]
|
self.params.ang_vel_scale = config["ang_vel_scale"]
|
||||||
|
|
|
@ -208,8 +208,10 @@ class RL_Sim(RL):
|
||||||
actions = self.model.forward(history_obs)
|
actions = self.model.forward(history_obs)
|
||||||
else:
|
else:
|
||||||
actions = self.model.forward(clamped_obs)
|
actions = self.model.forward(clamped_obs)
|
||||||
clamped_actions = torch.clamp(actions, self.params.clip_actions_lower, self.params.clip_actions_upper)
|
if self.params.clip_actions_lower is not None and self.params.clip_actions_upper is not None:
|
||||||
return clamped_actions
|
return torch.clamp(actions, self.params.clip_actions_lower, self.params.clip_actions_upper)
|
||||||
|
else:
|
||||||
|
return actions
|
||||||
|
|
||||||
def ThreadControl(self):
|
def ThreadControl(self):
|
||||||
thread_period = self.params.dt
|
thread_period = self.params.dt
|
||||||
|
|
|
@ -181,9 +181,14 @@ torch::Tensor RL_Real::Forward()
|
||||||
|
|
||||||
torch::Tensor actions = this->model.forward({this->history_obs}).toTensor();
|
torch::Tensor actions = this->model.forward({this->history_obs}).toTensor();
|
||||||
|
|
||||||
torch::Tensor clamped_actions = torch::clamp(actions, this->params.clip_actions_lower, this->params.clip_actions_upper);
|
if(this->params.clip_actions_upper.numel() != 0 && this->params.clip_actions_lower.numel() != 0)
|
||||||
|
{
|
||||||
return clamped_actions;
|
return torch::clamp(actions, this->params.clip_actions_lower, this->params.clip_actions_upper);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return actions;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void RL_Real::Plot()
|
void RL_Real::Plot()
|
||||||
|
|
|
@ -259,8 +259,14 @@ torch::Tensor RL_Sim::Forward()
|
||||||
actions = this->model.forward({clamped_obs}).toTensor();
|
actions = this->model.forward({clamped_obs}).toTensor();
|
||||||
}
|
}
|
||||||
|
|
||||||
torch::Tensor clamped_actions = torch::clamp(actions, this->params.clip_actions_lower, this->params.clip_actions_upper);
|
if(this->params.clip_actions_upper.numel() != 0 && this->params.clip_actions_lower.numel() != 0)
|
||||||
return clamped_actions;
|
{
|
||||||
|
return torch::clamp(actions, this->params.clip_actions_lower, this->params.clip_actions_upper);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return actions;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void RL_Sim::Plot()
|
void RL_Sim::Plot()
|
||||||
|
|
Loading…
Reference in New Issue