From e63005800cdfa0014111fb05918482a2ffe2f456 Mon Sep 17 00:00:00 2001 From: Netceor <45135347+Netceor@users.noreply.github.com> Date: Wed, 15 Nov 2023 15:09:08 +0800 Subject: [PATCH 1/5] =?UTF-8?q?=E6=9B=B4=E6=96=B0=E4=BA=86AEM?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- robowaiter/scene/scene.py | 75 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/robowaiter/scene/scene.py b/robowaiter/scene/scene.py index 2b603f2..e616ea1 100644 --- a/robowaiter/scene/scene.py +++ b/robowaiter/scene/scene.py @@ -100,6 +100,11 @@ class Scene: # 空调面板位置 self.obj_loc = [300.5, -140.0,114] + # AEM + self.visited = set() + self.all_frontier_list = set() + self.semantic_map = semantic_map + def reset(self): # 基类reset,默认执行仿真器初始化操作 @@ -553,4 +558,74 @@ class Scene: scene = stub.Do(action) print(scene.info) + def navigation_move(self, cur_objs, objs_name_set, v_list, scene_id=0, map_id=11): + print('------------------navigation_move----------------------') + scene = stub.Observe(GrabSim_pb2.SceneID(value=scene_id)) + walk_value = [scene.location.X, scene.location.Y, scene.rotation.Yaw] + print("position:", walk_value) + + # if map_id == 11: # coffee + # v_list = [[0, 880], [250, 1200], [-55, 750], [70, -200]] + # else: + # v_list = [[0.0, 0.0]] + + for walk_v in v_list: + walk_v = walk_v + [scene.rotation.Yaw - 90, 250, 60] + print("walk_v", walk_v) + action = GrabSim_pb2.Action(scene=scene_id, action=GrabSim_pb2.Action.ActionType.WalkTo, values=walk_v) + scene = stub.Do(action) + cur_objs, objs_name_set = camera.get_semantic_map(GrabSim_pb2.CameraName.Head_Segment, cur_objs, objs_name_set) + # if scene.info == "Unreachable": + print(scene.info) + return cur_objs, objs_name_set + + def isOutMap(self, pos, min_x=-350, max_x=600, min_y=-400, max_y=1450): + if pos[0] <= min_x or pos[0] >= max_x or pos[1] <= min_y or pos[1] >= max_y: + return True + return False + + def real2map(self, x, y): + ''' + 实际坐标->地图坐标 (向下取整) + ''' + # x = round((x - self.min_x) / self.scale_ratio) + # y = round((y - self.min_y) / self.scale_ratio) + x = math.floor((x + 350) / 5) + y = math.floor((y + 400) / 5) + return x, y + + def explore(self, map, cur_pos, explore_range): + for i in range(cur_pos[0] - explore_range, cur_pos[0] + explore_range + 1): + for j in range(cur_pos[1] - explore_range, cur_pos[1] + explore_range + 1): + if self.isOutMap((i, j)): + continue + x, y = self.real2map(i, j) + if map[x, y] == 0: + self.visited.add((i, j)) + for i in range(cur_pos[0] - explore_range, cur_pos[0] + explore_range + 1): + for j in range(cur_pos[1] - explore_range, cur_pos[1] + explore_range + 1): + if self.isOutMap((i, j)): + continue + x, y = self.real2map(i, j) + if map[x, y] == 0: + if self.isNewFrontier((i, j), map): + self.all_frontier_list.add((i, j)) + if len(self.all_frontier_list) <= 400: + free_list = list(self.visited) + free_array = np.array(free_list) + print(f"探索完成!以下是场景中可以到达的点:{free_array};其余点均是障碍物不可达") + return None + return self.getNearestFrontier(cur_pos, self.all_frontier_list) + + def isNewFrontier(self, pos, map): + around_nodes = [(pos[0], pos[1] + 1), (pos[0], pos[1] - 1), (pos[0] - 1, pos[1]), (pos[0] + 1, pos[1])] + + for node in around_nodes: + x, y = self.real2map(node[0], node[1]) + if node not in self.visited and map[x, y] == 0: + return True + if (pos[0], pos[1]) in self.all_frontier_list: + self.all_frontier_list.remove((pos[0], pos[1])) + return False + From b360cff1a6f59a58bd1f394ac9722a723d7cd1c3 Mon Sep 17 00:00:00 2001 From: Netceor <45135347+Netceor@users.noreply.github.com> Date: Wed, 15 Nov 2023 15:09:51 +0800 Subject: [PATCH 2/5] =?UTF-8?q?=E6=9B=B4=E6=96=B0=E4=BA=86AEM?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- robowaiter/scene/tasks/AEM.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/robowaiter/scene/tasks/AEM.py b/robowaiter/scene/tasks/AEM.py index 2ac5d4b..8c3a899 100644 --- a/robowaiter/scene/tasks/AEM.py +++ b/robowaiter/scene/tasks/AEM.py @@ -12,7 +12,11 @@ class SceneAEM(Scene): def _reset(self): pass def _run(self): - + cur_objs = [] + print('------------ 自主探索 ------------') + cur_objs = self.semantic_map.navigation_move(cur_objs, 0, 11) + print("物品列表如下:") + print(cur_objs) pass @@ -26,4 +30,4 @@ if __name__ == '__main__': # create task task = SceneAEM(robot) task.reset() - task.run() \ No newline at end of file + task.run() From b07e0dbd3f28369baf82975c4d0686403df5a30e Mon Sep 17 00:00:00 2001 From: Netceor <45135347+Netceor@users.noreply.github.com> Date: Wed, 15 Nov 2023 15:10:27 +0800 Subject: [PATCH 3/5] Update scene.py --- robowaiter/scene/scene.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/robowaiter/scene/scene.py b/robowaiter/scene/scene.py index e616ea1..df0ece8 100644 --- a/robowaiter/scene/scene.py +++ b/robowaiter/scene/scene.py @@ -2,11 +2,15 @@ import time import grpc import numpy as np import matplotlib.pyplot as plt - +from robowaiter.proto import camera +from robowaiter.proto import semantic_map +import math from robowaiter.proto import GrabSim_pb2 from robowaiter.proto import GrabSim_pb2_grpc + + channel = grpc.insecure_channel( "localhost:30001", options=[ From 1aa73700ec67a79b9bfcb4fe3c003f6bf2a2815d Mon Sep 17 00:00:00 2001 From: Netceor <45135347+Netceor@users.noreply.github.com> Date: Wed, 15 Nov 2023 15:11:01 +0800 Subject: [PATCH 4/5] Add files via upload --- robowaiter/proto/camera.py | 281 +++++++++++++++++++++++++++++++ robowaiter/proto/semantic_map.py | 147 ++++++++++++++++ 2 files changed, 428 insertions(+) create mode 100644 robowaiter/proto/camera.py create mode 100644 robowaiter/proto/semantic_map.py diff --git a/robowaiter/proto/camera.py b/robowaiter/proto/camera.py new file mode 100644 index 0000000..503e735 --- /dev/null +++ b/robowaiter/proto/camera.py @@ -0,0 +1,281 @@ +#!/usr/bin/env python3 +# -*- encoding: utf-8 -*- +# enconding = utf8 +import json +import string +import sys +import time +import grpc + +sys.path.append('./') +sys.path.append('../') + +import matplotlib.pyplot as plt +import numpy as np +from mpl_toolkits.axes_grid1 import make_axes_locatable + +import GrabSim_pb2_grpc +import GrabSim_pb2 + +channel = grpc.insecure_channel('localhost:30001', options=[ + ('grpc.max_send_message_length', 1024 * 1024 * 1024), + ('grpc.max_receive_message_length', 1024 * 1024 * 1024) +]) + +sim_client = GrabSim_pb2_grpc.GrabSimStub(channel) +objects_dic = {} + +''' +初始化,卸载已经加载的关卡,清除所有机器人 +''' + + +def Init(): + sim_client.Init(GrabSim_pb2.NUL()) + + +''' +获取当前可加载的地图信息(地图名字、地图尺寸) +''' + + +def AcquireAvailableMaps(): + AvailableMaps = sim_client.AcquireAvailableMaps(GrabSim_pb2.NUL()) + print(AvailableMaps) + + +''' +1、根据mapID加载指定地图 +2、如果scene_num>1,则根据地图尺寸偏移后加载多个相同地图 +3、这样就可以在一个关卡中训练多个地图 +''' + + +def SetWorld(map_id=0, scene_num=1): + print('------------------SetWorld----------------------') + world = sim_client.SetWorld(GrabSim_pb2.BatchMap(count=scene_num, mapID=map_id)) + + +''' +返回场景的状态信息 +1、返回机器人的位置和旋转 +2、返回各个关节的名字和旋转 +3、返回场景中标记的物品信息(名字、类型、位置、旋转) +4、返回场景中行人的信息(名字、位置、旋转、速度) +5、返回机器人手指和双臂的碰撞信息 +''' + + +def Observe(scene_id=0): + print('------------------show_env_info----------------------') + scene = sim_client.Observe(GrabSim_pb2.SceneID(value=scene_id)) + # print( + # f"location:{[scene.location]}, rotation:{scene.rotation}\n", + # f"joints number:{len(scene.joints)}, fingers number:{len(scene.fingers)}\n", + # f"objects number: {len(scene.objects)}, walkers number: {len(scene.walkers)}\n" + # f"timestep:{scene.timestep}, timestamp:{scene.timestamp}\n" + # f"collision:{scene.collision}, info:{scene.info}") + return scene + + +''' +重置场景 +1、重置桌子的宽度和高度 +2、清除生成的行人和物品 +3、重置关节角度、位置旋转 +4、清除碰撞信息 +5、重置场景中标记的物品 +''' + + +def Reset(scene_id=0): + print('------------------Reset----------------------') + scene = sim_client.Reset(GrabSim_pb2.ResetParams(scene=scene_id)) + print(scene) + + # 如果场景支持调整桌子 + # sim_client.Reset(GrabSim_pb2.ResetParams(scene = scene_id, adjust = True, height = 100.0, width = 100.0))" + + +''' +根据传入的部位名字,获取相机数据 +''' + + +def get_camera(part, scene_id=0): + print('------------------get_camera----------------------') + action = GrabSim_pb2.CameraList(cameras=part, scene=scene_id) + return sim_client.Capture(action) + + +''' +显示相机画面 +''' + + +def show_image(img_data, scene): + print('------------------show_image----------------------') + im = img_data.images[0] + + # 相机内参矩阵 + in_matrix = np.array( + [[im.parameters.fx, 0, im.parameters.cx], [0, im.parameters.fy, im.parameters.cy], [0, 0, 1]]) + + # 相机外参矩阵 + out_matrix = np.array(im.parameters.matrix).reshape((4, 4)) + + # # 旋转矩阵 + # rotation_matrix = out_matrix[0:3, 0:3] + # + # # 平移矩阵 + # translation_matrix = out_matrix[0:3, -1].reshape(3, 1) + + # 像素坐标 + # pixel_point = np.array([403, 212, 1]).reshape(3, 1) + pixel_x = 404 + pixel_y = 212 + depth = 369 + + # 将像素坐标转换为归一化设备坐标 + normalized_x = (pixel_x - im.parameters.cx) / im.parameters.fx + normalized_y = (pixel_y - im.parameters.cy) / im.parameters.fy + + # 将归一化设备坐标和深度值转换为相机坐标 + camera_x = normalized_x * depth + camera_y = normalized_y * depth + camera_z = depth + + # 构建相机坐标向量 + camera_coordinates = np.array([camera_x, camera_y, camera_z, 1]) + # print("物体相对相机坐标的齐次坐标: ", camera_coordinates) + + # 将相机坐标转换为机器人底盘坐标 + robot_coordinates = np.dot(out_matrix, camera_coordinates)[:3] + # print("物体的相对底盘坐标为:", robot_coordinates) + + # 将物体相对机器人底盘坐标转为齐次坐标 + robot_homogeneous_coordinates = np.array([robot_coordinates[0], -robot_coordinates[1], robot_coordinates[2], 1]) + # print("物体的相对底盘的齐次坐标为:", robot_homogeneous_coordinates) + + # 机器人坐标 + X = scene.location.X + Y = scene.location.Y + Z = 0.0 + + # 机器人旋转信息 + Roll = 0.0 + Pitch = 0.0 + Yaw = scene.rotation.Yaw + + # 构建平移矩阵 + T = np.array([[1, 0, 0, X], + [0, 1, 0, Y], + [0, 0, 1, Z], + [0, 0, 0, 1]]) + + # 构建旋转矩阵 + Rx = np.array([[1, 0, 0, 0], + [0, np.cos(Roll), -np.sin(Roll), 0], + [0, np.sin(Roll), np.cos(Roll), 0], + [0, 0, 0, 1]]) + + Ry = np.array([[np.cos(Pitch), 0, np.sin(Pitch), 0], + [0, 1, 0, 0], + [-np.sin(Pitch), 0, np.cos(Pitch), 0], + [0, 0, 0, 1]]) + + Rz = np.array([[np.cos(np.radians(Yaw)), -np.sin(np.radians(Yaw)), 0, 0], + [np.sin(np.radians(Yaw)), np.cos(np.radians(Yaw)), 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]]) + + R = np.dot(Rz, np.dot(Ry, Rx)) + + # 构建机器人的变换矩阵 + T_robot = np.dot(T, R) + # print(T_robot) + + # 将物体的坐标从机器人底盘坐标系转换到世界坐标系 + world_coordinates = np.dot(T_robot, robot_homogeneous_coordinates)[:3] + # print("物体的世界坐标:", world_coordinates) + + # 世界偏移后的坐标 + world_offest_coordinates = [world_coordinates[0] + 700, world_coordinates[1] + 1400, world_coordinates[2]] + # print("物体世界偏移的坐标: ", world_offest_coordinates) + # world_point = world_coordinates + np.array([]) + # print("物体的世界坐标为:", ) + + # # 相对机器人的世界坐标 + # world_point = rotation_matrix.T @ (in_matrix.T * 369 @ pixel_point - translation_matrix) + + + # print(world_point) + + # print(in_matrix @ out_matrix @ obj_world) + # + d = np.frombuffer(im.data, dtype=im.dtype).reshape((im.height, im.width, im.channels)) + plt.imshow(d, cmap="gray" if "depth" in im.name.lower() else None) + plt.show() + + +def save_obj_info(img_data, objs_name): + items = img_data.info.split(";") + dictionary = {} + for item in items: + key, value = item.split(":") + dictionary[int(key)] = value + + im = img_data.images[0] + d = np.frombuffer(im.data, dtype=im.dtype).reshape((im.height, im.width, im.channels)) + arr_flat = d.ravel() + for id in arr_flat: + if id not in dictionary: + print(id) + else: + objs_name.add(dictionary[id]) + return objs_name + + +def get_semantic_map(camera, cur_objs, objs_name): + scene = Observe(0) + objs = scene.objects + img_data = get_camera([camera]) + show_image(img_data, scene) + objs_name = save_obj_info(img_data, objs_name) + for obj_name in list(objs_name): + for obj in objs: + if obj.name == obj_name and obj not in cur_objs: + cur_objs.append(obj) + break + return cur_objs, objs_name + + +if __name__ == '__main__': + map_id = 11 # 地图编号 + scene_num = 1 # 场景数量 + cur_objs = [] + + print('------------ 初始化加载场景 ------------') + Init() + AcquireAvailableMaps() + SetWorld(map_id, scene_num) + time.sleep(5.0) + + for i in range(scene_num): + print('------------ 场景操作 ------------') + scene = Observe(i) + + Reset(i) + + print('------------ 相机捕获 ------------') + Reset(i) + time.sleep(1.0) + + # print(get_semantic_map(GrabSim_pb2.CameraName.Head_Segment,cur_objs)) + + for camera_name in [GrabSim_pb2.CameraName.Head_Depth]: + img_data = get_camera([camera_name], i) + show_image(img_data, scene) + # for camera_name in [GrabSim_pb2.CameraName.Waist_Color, GrabSim_pb2.CameraName.Waist_Depth]: + # img_data = get_camera([camera_name], i) + # show_image(img_data, 2) diff --git a/robowaiter/proto/semantic_map.py b/robowaiter/proto/semantic_map.py new file mode 100644 index 0000000..7b2ebf9 --- /dev/null +++ b/robowaiter/proto/semantic_map.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 +# -*- encoding: utf-8 -*- +# enconding = utf8 +import sys +import time +import grpc + +# import camera +from robowaiter.proto import camera + +sys.path.append('./') +sys.path.append('../') + +import matplotlib.pyplot as plt +import numpy as np +from mpl_toolkits.axes_grid1 import make_axes_locatable + +import GrabSim_pb2_grpc +import GrabSim_pb2 + +channel = grpc.insecure_channel('localhost:30001', options=[ + ('grpc.max_send_message_length', 1024 * 1024 * 1024), + ('grpc.max_receive_message_length', 1024 * 1024 * 1024) +]) + +sim_client = GrabSim_pb2_grpc.GrabSimStub(channel) + +''' +初始化,卸载已经加载的关卡,清除所有机器人 +''' + + +def Init(): + sim_client.Init(GrabSim_pb2.NUL()) + + +''' +获取当前可加载的地图信息(地图名字、地图尺寸) +''' + + +def AcquireAvailableMaps(): + AvailableMaps = sim_client.AcquireAvailableMaps(GrabSim_pb2.NUL()) + print(AvailableMaps) + + +''' +1、根据mapID加载指定地图 +2、如果scene_num>1,则根据地图尺寸偏移后加载多个相同地图 +3、这样就可以在一个关卡中训练多个地图 +''' + + +def SetWorld(map_id=0, scene_num=1): + print('------------------SetWorld----------------------') + world = sim_client.SetWorld(GrabSim_pb2.BatchMap(count=scene_num, mapID=map_id)) + + +''' +返回场景的状态信息 +1、返回机器人的位置和旋转 +2、返回各个关节的名字和旋转 +3、返回场景中标记的物品信息(名字、类型、位置、旋转) +4、返回场景中行人的信息(名字、位置、旋转、速度) +5、返回机器人手指和双臂的碰撞信息 +''' + + +def Observe(scene_id=0): + print('------------------show_env_info----------------------') + scene = sim_client.Observe(GrabSim_pb2.SceneID(value=scene_id)) + print( + f"location:{[scene.location]}, rotation:{scene.rotation}\n", + f"joints number:{len(scene.joints)}, fingers number:{len(scene.fingers)}\n", + f"objects number: {len(scene.objects)}, walkers number: {len(scene.walkers)}\n" + f"timestep:{scene.timestep}, timestamp:{scene.timestamp}\n" + f"collision:{scene.collision}, info:{scene.info}") + + +''' +重置场景 +1、重置桌子的宽度和高度 +2、清除生成的行人和物品 +3、重置关节角度、位置旋转 +4、清除碰撞信息 +5、重置场景中标记的物品 +''' + + +def Reset(scene_id=0): + print('------------------Reset----------------------') + scene = sim_client.Reset(GrabSim_pb2.ResetParams(scene=scene_id)) + print(scene) + + # 如果场景支持调整桌子 + # sim_client.Reset(GrabSim_pb2.ResetParams(scene = scene_id, adjust = True, height = 100.0, width = 100.0))" + + +""" +导航移动 +yaw:机器人朝向; +velocity:速度,>0代表移动,<0代表瞬移,=0代表只查询; +dis:最终达到的位置距离目标点最远距离,如果超过此距离则目标位置不可达 +""" + + +def navigation_move(cur_objs, scene_id=0, map_id=0): + print('------------------navigation_move----------------------') + scene = sim_client.Observe(GrabSim_pb2.SceneID(value=scene_id)) + walk_value = [scene.location.X, scene.location.Y, scene.rotation.Yaw] + print("position:", walk_value) + objs_name_set = set() + + if map_id == 11: # coffee + v_list = [[247,520], [247, 700], [270, 1100], [55, 940], [30, 900], [30, 520], [160, -165], [247, 0],[247, 520]] + else: + v_list = [[0.0, 0.0]] + + for walk_v in v_list: + walk_v = walk_v + [scene.rotation.Yaw - 90, 200, 10] + print("walk_v", walk_v) + action = GrabSim_pb2.Action(scene=scene_id, action=GrabSim_pb2.Action.ActionType.WalkTo, values=walk_v) + scene = sim_client.Do(action) + cur_objs, objs_name_set = camera.get_semantic_map(GrabSim_pb2.CameraName.Head_Segment, cur_objs, objs_name_set) + print(scene.info) + return cur_objs + + +if __name__ == '__main__': + map_id = 11 # 地图编号 + scene_num = 1 # 场景数量 + cur_objs = [] + + print('------------ 初始化加载场景 ------------') + Init() + AcquireAvailableMaps() + SetWorld(map_id, scene_num) + time.sleep(5.0) + + for i in range(scene_num): + print('------------ 场景操作 ------------') + Observe(i) + Reset(i) + + print('------------ 导航移动 ------------') + cur_objs = navigation_move(cur_objs, i, map_id) + print(cur_objs)