parent
d7b5fc9f6c
commit
3859c810d1
|
@ -7,6 +7,7 @@ import sys
|
|||
import time
|
||||
import grpc
|
||||
|
||||
|
||||
sys.path.append('./')
|
||||
sys.path.append('../')
|
||||
|
||||
|
@ -24,6 +25,10 @@ channel = grpc.insecure_channel('localhost:30001', options=[
|
|||
|
||||
sim_client = GrabSim_pb2_grpc.GrabSimStub(channel)
|
||||
objects_dic = {}
|
||||
obstacle_objs_id = [114, 115, 122, 96, 102, 83, 121, 105, 108, 89, 100, 90,
|
||||
111, 103, 95, 92, 76, 113, 101, 29, 112, 87, 109, 98,
|
||||
106, 120, 97, 86, 104, 78, 85, 81, 82, 84, 91, 93, 94,
|
||||
99, 107, 116, 117, 118, 119, 255]
|
||||
|
||||
'''
|
||||
初始化,卸载已经加载的关卡,清除所有机器人
|
||||
|
@ -217,6 +222,105 @@ def show_image(img_data, scene):
|
|||
plt.imshow(d, cmap="gray" if "depth" in im.name.lower() else None)
|
||||
plt.show()
|
||||
|
||||
def transform_co(img_data, pixel_x_, pixel_y_,depth_, scene ,id = 0,label = 0):
|
||||
im = img_data.images[0]
|
||||
|
||||
# 相机外参矩阵
|
||||
out_matrix = np.array(im.parameters.matrix).reshape((4, 4))
|
||||
|
||||
d = np.frombuffer(im.data, dtype=im.dtype).reshape((im.height, im.width, im.channels))
|
||||
depth = depth_
|
||||
|
||||
# 将像素坐标转换为归一化设备坐标
|
||||
normalized_x = (pixel_x_ - im.parameters.cx) / im.parameters.fx
|
||||
normalized_y = (pixel_y_ - im.parameters.cy) / im.parameters.fy
|
||||
|
||||
# 将归一化设备坐标和深度值转换为相机坐标
|
||||
camera_x = normalized_x * depth
|
||||
camera_y = normalized_y * depth
|
||||
camera_z = depth
|
||||
|
||||
# 构建相机坐标向量
|
||||
camera_coordinates = np.array([camera_x, camera_y, camera_z, 1])
|
||||
# print("物体相对相机坐标的齐次坐标: ", camera_coordinates)
|
||||
|
||||
# 将相机坐标转换为机器人底盘坐标
|
||||
robot_coordinates = np.dot(out_matrix, camera_coordinates)[:3]
|
||||
# print("物体的相对底盘坐标为:", robot_coordinates)
|
||||
|
||||
# 将物体相对机器人底盘坐标转为齐次坐标
|
||||
robot_homogeneous_coordinates = np.array([robot_coordinates[0], -robot_coordinates[1], robot_coordinates[2], 1])
|
||||
# print("物体的相对底盘的齐次坐标为:", robot_homogeneous_coordinates)
|
||||
|
||||
# 机器人坐标
|
||||
X = scene.location.X
|
||||
Y = scene.location.Y
|
||||
Z = 0.0
|
||||
|
||||
# 机器人旋转信息
|
||||
Roll = 0.0
|
||||
Pitch = 0.0
|
||||
Yaw = scene.rotation.Yaw
|
||||
|
||||
# 构建平移矩阵
|
||||
T = np.array([[1, 0, 0, X],
|
||||
[0, 1, 0, Y],
|
||||
[0, 0, 1, Z],
|
||||
[0, 0, 0, 1]])
|
||||
|
||||
# 构建旋转矩阵
|
||||
Rx = np.array([[1, 0, 0, 0],
|
||||
[0, np.cos(Roll), -np.sin(Roll), 0],
|
||||
[0, np.sin(Roll), np.cos(Roll), 0],
|
||||
[0, 0, 0, 1]])
|
||||
|
||||
Ry = np.array([[np.cos(Pitch), 0, np.sin(Pitch), 0],
|
||||
[0, 1, 0, 0],
|
||||
[-np.sin(Pitch), 0, np.cos(Pitch), 0],
|
||||
[0, 0, 0, 1]])
|
||||
|
||||
Rz = np.array([[np.cos(np.radians(Yaw)), -np.sin(np.radians(Yaw)), 0, 0],
|
||||
[np.sin(np.radians(Yaw)), np.cos(np.radians(Yaw)), 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1]])
|
||||
|
||||
R = np.dot(Rz, np.dot(Ry, Rx))
|
||||
|
||||
# 构建机器人的变换矩阵
|
||||
T_robot = np.dot(T, R)
|
||||
# print(T_robot)
|
||||
|
||||
# 将物体的坐标从机器人底盘坐标系转换到世界坐标系
|
||||
world_coordinates = np.dot(T_robot, robot_homogeneous_coordinates)[:3]
|
||||
|
||||
# if world_coordinates[0] < 200 and world_coordinates[1] <= 1050:
|
||||
# world_coordinates[0] += 400
|
||||
# world_coordinates[1] += 400
|
||||
# elif world_coordinates[0] >= 200 and world_coordinates[1] <= 1050:
|
||||
# world_coordinates[0] -= 550
|
||||
# world_coordinates[1] += 400
|
||||
# elif world_coordinates[0] >= 200 and world_coordinates[1] > 1050:
|
||||
# world_coordinates[0] -= 550
|
||||
# world_coordinates[1] -= 1450
|
||||
# elif world_coordinates[0] < 200 and world_coordinates[1] > 1050:
|
||||
# world_coordinates[0] += 400
|
||||
# world_coordinates[1] -= 1450
|
||||
# print("物体的世界坐标:", world_coordinates)
|
||||
|
||||
# 世界偏移后的坐标
|
||||
world_offest_coordinates = [world_coordinates[0] + 700, world_coordinates[1] + 1400, world_coordinates[2]]
|
||||
# print("物体世界偏移的坐标: ", world_offest_coordinates)
|
||||
return world_coordinates
|
||||
|
||||
# 世界偏移后的坐标
|
||||
# world_offest_coordinates = [world_coordinates[0] + 700, world_coordinates[1] + 1400, world_coordinates[2]]
|
||||
# print("物体世界偏移的坐标: ", world_offest_coordinates)
|
||||
|
||||
|
||||
# dict_f = {'id':id,'label':label,'world_coordinates':world_coordinates,'world_offest_coordinates':world_offest_coordinates}
|
||||
# with open('./semantic.txt', 'a') as file:
|
||||
# file.write(str(dict_f) + '\n')
|
||||
|
||||
|
||||
def save_obj_info(img_data, objs_name):
|
||||
items = img_data.info.split(";")
|
||||
|
@ -236,6 +340,51 @@ def save_obj_info(img_data, objs_name):
|
|||
return objs_name
|
||||
|
||||
|
||||
def get_obstacle_point(scene, cur_obstacle_world_points, map_ratio):
|
||||
cur_obstacle_pixel_points = []
|
||||
img_data_segment = get_camera([GrabSim_pb2.CameraName.Head_Segment])
|
||||
img_data_depth = get_camera([GrabSim_pb2.CameraName.Head_Depth])
|
||||
im_segment = img_data_segment.images[0]
|
||||
im_depth = img_data_depth.images[0]
|
||||
d_segment = np.frombuffer(im_segment.data, dtype=im_segment.dtype).reshape((im_segment.height, im_segment.width, im_segment.channels))
|
||||
d_depth = np.frombuffer(im_depth.data, dtype=im_depth.dtype).reshape((im_depth.height, im_depth.width, im_depth.channels))
|
||||
|
||||
|
||||
# plt.imshow(d_depth, cmap="gray" if "depth" in im_depth.name.lower() else None)
|
||||
# plt.show()
|
||||
#
|
||||
# plt.imshow(d_segment, cmap="gray" if "depth" in im_segment.name.lower() else None)
|
||||
# plt.show()
|
||||
|
||||
d_depth = np.transpose(d_depth, (1, 0, 2))
|
||||
d_segment = np.transpose(d_segment, (1, 0, 2))
|
||||
for i in range(0, d_segment.shape[0], map_ratio):
|
||||
for j in range(0, d_segment.shape[1], map_ratio):
|
||||
if d_depth[i][j][0] == 600:
|
||||
continue
|
||||
# if d_segment[i][j] == 96:
|
||||
# print(f"apple的像素坐标:({i},{j})")
|
||||
# print(f"apple的深度:{d_depth[i][j][0]}")
|
||||
# print(f"apple的世界坐标: {transform_co(img_data_depth, i, j, d_depth[i][j][0], scene)}")
|
||||
# if d_segment[i][j] == 113:
|
||||
# print(f"kettle的像素坐标:({i},{j})")
|
||||
# print(f"kettle的深度:{d_depth[i][j][0]}")
|
||||
# print(f"kettle的世界坐标: {transform_co(img_data_depth, i, j, d_depth[i][j][0], scene)}")
|
||||
if d_segment[i][j][0] in obstacle_objs_id:
|
||||
cur_obstacle_pixel_points.append([i, j])
|
||||
# print(cur_obstacle_pixel_points)
|
||||
for pixel in cur_obstacle_pixel_points:
|
||||
world_point = transform_co(img_data_depth, pixel[0], pixel[1], d_depth[pixel[0]][pixel[1]][0], scene)
|
||||
cur_obstacle_world_points.append([world_point[0], world_point[1]])
|
||||
# print(f"{pixel}:{[world_point[0], world_point[1]]}")
|
||||
return cur_obstacle_world_points
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def get_semantic_map(camera, cur_objs, objs_name):
|
||||
scene = Observe(0)
|
||||
objs = scene.objects
|
||||
|
@ -273,9 +422,9 @@ if __name__ == '__main__':
|
|||
|
||||
# print(get_semantic_map(GrabSim_pb2.CameraName.Head_Segment,cur_objs))
|
||||
|
||||
for camera_name in [GrabSim_pb2.CameraName.Head_Depth]:
|
||||
img_data = get_camera([camera_name], i)
|
||||
show_image(img_data, scene)
|
||||
# for camera_name in [GrabSim_pb2.CameraName.Head_Depth]:
|
||||
# img_data = get_camera([camera_name], i)
|
||||
# show_image(img_data, scene)
|
||||
# for camera_name in [GrabSim_pb2.CameraName.Waist_Color, GrabSim_pb2.CameraName.Waist_Depth]:
|
||||
# img_data = get_camera([camera_name], i)
|
||||
# show_image(img_data, 2)
|
||||
|
|
|
@ -666,7 +666,7 @@ class Scene:
|
|||
scene = stub.Do(action)
|
||||
print(scene.info)
|
||||
|
||||
def navigation_move(self, cur_objs, objs_name_set, v_list, scene_id=0, map_id=11):
|
||||
def navigation_move(self, cur_objs, objs_name_set, cur_obstacle_world_points, v_list, map_ratio, scene_id=0, map_id=11):
|
||||
print('------------------navigation_move----------------------')
|
||||
scene = stub.Observe(GrabSim_pb2.SceneID(value=scene_id))
|
||||
walk_value = [scene.location.X, scene.location.Y]
|
||||
|
@ -679,6 +679,8 @@ class Scene:
|
|||
print("walk_v", walk_v)
|
||||
action = GrabSim_pb2.Action(scene=scene_id, action=GrabSim_pb2.Action.ActionType.WalkTo, values=walk_v)
|
||||
scene = stub.Do(action)
|
||||
cur_obstacle_world_points = camera.get_obstacle_point(scene, cur_obstacle_world_points,map_ratio)
|
||||
|
||||
cur_objs, objs_name_set = camera.get_semantic_map(GrabSim_pb2.CameraName.Head_Segment, cur_objs,
|
||||
objs_name_set)
|
||||
# if scene.info == "Unreachable":
|
||||
|
@ -696,11 +698,14 @@ class Scene:
|
|||
print("walk_v", walk_v)
|
||||
action = GrabSim_pb2.Action(scene=scene_id, action=GrabSim_pb2.Action.ActionType.WalkTo, values=walk_v)
|
||||
scene = stub.Do(action)
|
||||
|
||||
cur_obstacle_world_points = camera.get_obstacle_point(scene, cur_obstacle_world_points, map_ratio)
|
||||
|
||||
cur_objs, objs_name_set = camera.get_semantic_map(GrabSim_pb2.CameraName.Head_Segment, cur_objs,
|
||||
objs_name_set)
|
||||
# if scene.info == "Unreachable":
|
||||
print(scene.info)
|
||||
return cur_objs, objs_name_set
|
||||
return cur_objs, objs_name_set, cur_obstacle_world_points
|
||||
|
||||
def isOutMap(self, pos, min_x=-200, max_x=600, min_y=-250, max_y=1300):
|
||||
if pos[0] <= min_x or pos[0] >= max_x or pos[1] <= min_y or pos[1] >= max_y:
|
||||
|
@ -741,21 +746,21 @@ class Scene:
|
|||
free_array = np.array(free_list)
|
||||
print(f"主动探索完成!以下是场景中可以到达的点:{free_array};其余点均是障碍物不可达")
|
||||
|
||||
# 画地图: X行Y列,第一行在下面
|
||||
plt.clf()
|
||||
plt.imshow(self.auto_map, cmap='binary', alpha=0.5, origin='lower',
|
||||
extent=(-250, 1300,
|
||||
-200, 600))
|
||||
plt.show()
|
||||
print("已绘制完成地图!!!")
|
||||
# # 画地图: X行Y列,第一行在下面
|
||||
# plt.clf()
|
||||
# plt.imshow(self.auto_map, cmap='binary', alpha=0.5, origin='lower',
|
||||
# extent=(-250, 1300,
|
||||
# -200, 600))
|
||||
# plt.show()
|
||||
# print("已绘制完成地图!!!")
|
||||
|
||||
return None
|
||||
# 画地图: X行Y列,第一行在下面
|
||||
plt.imshow(self.auto_map, cmap='binary', alpha=0.5, origin='lower',
|
||||
extent=(-250, 1300,
|
||||
-200, 600))
|
||||
plt.show()
|
||||
print("已绘制部分地图!")
|
||||
# # 画地图: X行Y列,第一行在下面
|
||||
# plt.imshow(self.auto_map, cmap='binary', alpha=0.5, origin='lower',
|
||||
# extent=(-250, 1300,
|
||||
# -200, 600))
|
||||
# plt.show()
|
||||
# print("已绘制部分地图!")
|
||||
return self.getNearestFrontier(cur_pos, self.all_frontier_list)
|
||||
|
||||
def isNewFrontier(self, pos, map):
|
||||
|
|
|
@ -2,8 +2,13 @@
|
|||
环境主动探索和记忆
|
||||
要求输出探索结果(语义地图)对环境重点信息记忆。生成环境的语义拓扑地图,和不少于10个环境物品的识别和位置记忆,可以是图片或者文字或者格式化数据。
|
||||
"""
|
||||
import math
|
||||
import matplotlib as mpl
|
||||
import pickle
|
||||
|
||||
import numpy as np
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
from robowaiter.scene.scene import Scene
|
||||
class SceneAEM(Scene):
|
||||
def __init__(self, robot):
|
||||
|
@ -12,8 +17,20 @@ class SceneAEM(Scene):
|
|||
def _reset(self):
|
||||
pass
|
||||
def _run(self):
|
||||
# 创建一个从白色(1)到灰色(0)的 colormap
|
||||
cur_objs = []
|
||||
cur_obstacle_world_points = []
|
||||
objs_name_set = set()
|
||||
visited_obstacle = set()
|
||||
|
||||
map_ratio = 5
|
||||
# # 创建一个颜色映射,其中0表示黑色,1表示白色
|
||||
# cmap = plt.cm.get_cmap('gray')
|
||||
# cmap.set_under('black')
|
||||
# cmap.set_over('white')
|
||||
|
||||
|
||||
|
||||
file_name = '../../proto/map_1.pkl'
|
||||
if os.path.exists(file_name):
|
||||
with open(file_name, 'rb') as file:
|
||||
|
@ -26,15 +43,43 @@ class SceneAEM(Scene):
|
|||
# print(reachable([237,490]))
|
||||
# navigation_move([[237,490]], i, map_id)
|
||||
# navigation_test(i,map_id)
|
||||
map_map = np.zeros((math.ceil(950 / map_ratio), math.ceil(1850 / map_ratio)))
|
||||
while True:
|
||||
goal = self.explore(map, 120) # cur_pos 指的是当前机器人的位置,场景中应该也有接口可以获取
|
||||
if goal is None:
|
||||
break
|
||||
cur_objs, objs_name_set = self.navigation_move(cur_objs, objs_name_set, [[goal[0], goal[1]]], 0, 11)
|
||||
cur_objs, objs_name_set, cur_obstacle_world_points= self.navigation_move(cur_objs, objs_name_set, cur_obstacle_world_points, [[goal[0], goal[1]]], map_ratio, 0, 11)
|
||||
|
||||
for point in cur_obstacle_world_points:
|
||||
if point[0] < -350 or point[0] > 600 or point[1] < -400 or point[1] > 1450:
|
||||
continue
|
||||
map_map[math.floor((point[0] + 350) / map_ratio), math.floor((point[1] + 400) / map_ratio)] = 1
|
||||
visited_obstacle.add((math.floor((point[0] + 350) / map_ratio), math.floor((point[1] + 400) / map_ratio)))
|
||||
# plt.imshow(map_map, cmap='binary', alpha=0.5, origin='lower',
|
||||
# extent=(-400 / map_ratio, 1450 / map_ratio,
|
||||
# -350 / map_ratio, 600 / map_ratio))
|
||||
|
||||
# 使用imshow函数绘制图像,其中cmap参数设置颜色映射
|
||||
plt.imshow(map_map, cmap='binary', alpha=0.5, origin='lower',
|
||||
extent=(-400 / map_ratio, 1450 / map_ratio,
|
||||
-350 / map_ratio, 600 / map_ratio))
|
||||
# plt.imshow(map_map, cmap='binary', alpha=0.5, origin='lower')
|
||||
# plt.axis('off')
|
||||
plt.show()
|
||||
print("------------物品信息--------------")
|
||||
print(cur_objs)
|
||||
|
||||
pass
|
||||
# for i in range(-350, 600):
|
||||
# for j in range(-400, 1450):
|
||||
# i = (math.floor((i + 350) / map_ratio))
|
||||
# j = (math.floor((j + 400) / map_ratio))
|
||||
# if (i, j) not in visited_obstacle:
|
||||
# map_map[i][j] = 1
|
||||
plt.imshow(map_map, cmap='binary', alpha=0.5, origin='lower',
|
||||
extent=(-400 / map_ratio, 1450 / map_ratio,
|
||||
-350 / map_ratio, 600 / map_ratio))
|
||||
# plt.axis('off')
|
||||
plt.show()
|
||||
print("已绘制完成地图!!!")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
Loading…
Reference in New Issue