简单多轮对话

This commit is contained in:
ChenXL97 2023-11-15 14:30:57 +08:00
parent f06b87e4fb
commit 254f070c5c
9 changed files with 125 additions and 28 deletions

View File

@ -6,14 +6,20 @@ from robowaiter.llm_client.ask_llm import ask_llm
class DealChat(Act):
def __init__(self):
super().__init__()
self.chat_history = ""
def _update(self) -> ptree.common.Status:
# if self.scene.status?
chat = self.scene.state['chat_list'].pop()
self.chat_history += chat + '\n'
res_dict = ask_llm(chat)
res_dict = ask_llm(self.chat_history)
answer = res_dict["Answer"]
goal = eval(res_dict["Goal"])
self.chat_history += answer + '\n'
goal = res_dict["Goal"]
if goal and "{" not in goal:
goal = {str(goal)}
if goal is not None:
print(f'goal{goal}')

View File

@ -4,6 +4,8 @@ import requests
import urllib3
from robowaiter.utils import get_root_path
from robowaiter.llm_client.single_round import single_round
from robowaiter.llm_client.tool_api import run_conversation
########################################
# 该文件实现了与大模型的简单通信
########################################
@ -22,7 +24,9 @@ def ask_llm(question):
if question in test_questions_dict:
ans = test_questions_dict[question]
else:
ans = single_round(question)
ans = run_conversation(question, stream=False)
# ans = single_round(question)
print(f"大模型输出: {ans}")
return ans

View File

@ -1,6 +1,6 @@
import requests
import urllib3
from robowaiter.llm_client.tool_api import run_conversation
########################################
# 该文件实现了与大模型的简单通信、多轮对话输入end表示对话结束
########################################
@ -20,8 +20,8 @@ while k!='end':
user_dict={"role": "user","content":question_now}
data_memory.append(user_dict)
#print(data_memory)
response = requests.post(url, headers=headers, json={"messages":data_memory, "repetition_penalty": 1.0}, verify=False)
answer=response.json()['choices'][n]['message']['content']
response = run_conversation(str(data_memory))
answer=str(response)
print(answer)
assistant_dict={"role": "assistant","content":answer}
data_memory.append(assistant_dict)

View File

@ -3,8 +3,8 @@ import json
import openai
from colorama import init, Fore
from loguru import logger
from tool_register import get_tools, dispatch_tool
import json
from robowaiter.llm_client.tool_register import get_tools, dispatch_tool
import requests
import json
@ -29,22 +29,21 @@ def get_response(**kwargs):
functions = get_tools()
def run_conversation(query: str, stream=False, functions=None, max_retry=5):
def run_conversation(query: str, stream=False, max_retry=5):
params = dict(model="chatglm3", messages=[{"role": "user", "content": query}], stream=stream)
if functions:
params["functions"] = functions
params["functions"] = functions
response = get_response(**params)
for _ in range(max_retry):
if response["choices"][0]["message"].get("function_call"):
function_call = response["choices"][0]["message"]["function_call"]
logger.info(f"Function Call Response: {function_call}")
if "sub_task" in function_call["name"]:
return {
"Answer": "好的",
"Goal": function_call["arguments"]
"Goal": json.loads(function_call["arguments"])["goal"]
}
logger.info(f"Function Call Response: {function_call}")
function_args = json.loads(function_call["arguments"])
tool_response = dispatch_tool(function_call["name"], function_args)
logger.info(f"Tool Call Response: {tool_response}")
@ -71,5 +70,5 @@ def run_conversation(query: str, stream=False, functions=None, max_retry=5):
if __name__ == "__main__":
query = "关掉空调"
print(run_conversation(query, functions=functions, stream=False))
query = "可以带我去吗"
print(run_conversation(query, stream=False))

View File

@ -40,7 +40,7 @@ def register_tool(func: callable):
"params": tool_params
}
print("[registered tool] " + pformat(tool_def))
# print("[registered tool] " + pformat(tool_def))
_TOOL_HOOKS[tool_name] = func
_TOOL_DESCRIPTIONS[tool_name] = tool_def
@ -126,13 +126,14 @@ def get_tools() -> dict:
@register_tool
def create_sub_task(
goal: Annotated[str, '子任务需要达到的目标条件集合', True]
goal: Annotated[str, '子任务需要达到的目标条件集合,例如{On(Coffee,Bar)}{At(Robot,Table1)}{Is(AC,Off)}', True]
) -> str:
"""
当需要完成具身任务如做咖啡拿放物体扫地前往某位置调用该函数根据用户的提示进行意图理解生成子任务的目标状态集合 `goal`以一阶逻辑的形式表示用户意图
做一杯咖啡,`goal`={On(Coffee,Bar)},
前往一号桌,`goal`={At(Robot,Table1)},
打开空调,`goal`={Is(AC,On)},
做一杯咖啡,则该函数的参数为 "On(Coffee,Bar)",
前往一号桌,则该函数的参数为 "At(Robot,Table1)",
打开空调,则该函数的参数为 "Is(AC,On)",
关空调,则该函数的参数为 "Is(AC,Off)",
"""
return goal
@ -142,7 +143,7 @@ def get_object_info(
obj: Annotated[str, '需要获取信息的物体名称', True]
) -> str:
"""
获取场景中指定物体 `object` 的信息
获取场景中指定物体 `object` 的信息如果`object` 是一个地点例如洗手间地方则输出如果`object`是一个咖啡则输出
"""
near_object = None
if obj == "Table":

View File

@ -332,6 +332,23 @@ class Scene:
)
)
# def walker_bubble(self, message):
# status = self.status
# walker_name = status.walkers[0].name
# talk_content = walker_name + ":" + message
# self.control_robot_action(0, 0, 3, talk_content)
# def control_robot_action(self, scene_id=0, type=0, action=0, message="你好"):
# print('------------------control_robot_action----------------------')
# scene = stub.ControlRobot(
# GrabSim_pb2.ControlInfo(scene=scene_id, type=type, action=action, content=message))
# if (str(scene.info).find("Action Success") > -1):
# print(scene.info)
# return True
# else:
# print(scene.info)
# return False
def animation_control(self, animation_type):
# animation_type: 1:make coffee 2: pour water 3: grab food 4: mop floor 5: clean table
scene = stub.ControlRobot(

View File

@ -16,15 +16,30 @@ class SceneGQA(Scene):
super().__init__(robot)
# 在这里加入场景中发生的事件, (事件发生的时间,事件函数)
self.event_list = [
(5, self.create_chat_event("给我一杯咖啡")),
(20, self.create_chat_event("我要拿铁")),
(40, self.create_chat_event("再来一杯")),
(5, self.create_chat_event("洗手间在哪里")),
(12, self.create_chat_event("可以带我去吗")),
]
def _reset(self):
self.add_walker(1085, 2630, 220)
self.control_walker([self.walker_control_generator(0, False, 100, 755, 1900, 180)])
self.clean_walker()
self.add_walker(50, 500, 0)
self.walker_bubble("洗手间在哪里")
# self.control_walker([self.walker_control_generator(0, False, 100, 755, 1900, 180)])
def _run(self):
pass
if __name__ == '__main__':
import os
from robowaiter.robot.robot import Robot
robot = Robot()
# create task
task = SceneGQA(robot)
task.reset()
task.run()

View File

@ -18,11 +18,12 @@ class SceneOT(Scene):
super().__init__(robot)
# 在这里加入场景中发生的事件
self.event_list = [
(5,self.create_chat_event("给我一杯咖啡")) # (事件发生的时间,事件函数)
(5,self.create_chat_event("来二号桌")),
# (5,self.create_chat_event("感觉有点冷,可以关一下空调吗")),
]
def _reset(self):
pass
self.add_walker(50, 300, 0)
# self.add_walker(1085, 2630, 220)
# self.control_walker([self.walker_control_generator(0, False, 100, 755, 1900, 180)])
@ -31,3 +32,13 @@ class SceneOT(Scene):
pass
if __name__ == '__main__':
import os
from robowaiter.robot.robot import Robot
robot = Robot()
# create task
task = SceneOT(robot)
task.reset()
task.run()

View File

@ -0,0 +1,44 @@
"""
人提出请求机器人完成任务
1. 做咖啡固定动画接收到做咖啡指令走到咖啡机拿杯子操作咖啡机取杯子送到客人桌子上
2. 倒水
3. 夹点心
具体描述设计一套点单规则如菜单包含咖啡点心等按照规则拟造随机的订单在收到订单后通过大模型让机器人输出合理的备餐计划并尝试在模拟环境中按照这个规划实现任务
"""
# todo: 接收点单信息,大模型生成任务规划
from robowaiter.scene.scene import Scene
class SceneOT(Scene):
def __init__(self, robot):
super().__init__(robot)
# 在这里加入场景中发生的事件
self.event_list = [
# (5,self.create_chat_event("做一杯咖啡")),
(5,self.create_chat_event("感觉有点冷,可以关一下空调吗")),
]
def _reset(self):
self.add_walker(50, 300, 0)
# self.add_walker(1085, 2630, 220)
# self.control_walker([self.walker_control_generator(0, False, 100, 755, 1900, 180)])
def _run(self):
pass
if __name__ == '__main__':
import os
from robowaiter.robot.robot import Robot
robot = Robot()
# create task
task = SceneOT(robot)
task.reset()
task.run()