Merge branch 'main' of github.com:HPCL-EI/RoboWaiter
This commit is contained in:
commit
ce6d83f3d5
|
@ -0,0 +1,2 @@
|
|||
OPENAI_API_BASE=
|
||||
OPENAI_API_KEY=
|
|
@ -0,0 +1,202 @@
|
|||
import math
|
||||
import json
|
||||
import openai
|
||||
import utils
|
||||
from memory import CoreMemory, ArchivalMemory, RecallMemory
|
||||
|
||||
|
||||
def construct_system_with_memory(system, core_memory, memory_edit_timestamp, archival_memory=None, recall_memory=None):
|
||||
full_system_message = "\n".join(
|
||||
[
|
||||
system,
|
||||
"\n",
|
||||
f"### Memory [last modified: {memory_edit_timestamp}]",
|
||||
f"{len(recall_memory) if recall_memory else 0} previous messages between you and the user are stored in recall memory (use functions to access them)",
|
||||
f"{len(archival_memory) if archival_memory else 0} total memories you created are stored in archival memory (use functions to access them)",
|
||||
"\nCore memory shown below (limited in size, additional information stored in archival / recall memory):",
|
||||
"<persona>",
|
||||
core_memory.persona,
|
||||
"</persona>",
|
||||
"<human>",
|
||||
core_memory.human,
|
||||
"</human>",
|
||||
]
|
||||
)
|
||||
return full_system_message
|
||||
|
||||
|
||||
def initialize_message_sequence(system, core_memory, archival_memory=None, recall_memory=None):
|
||||
memory_edit_timestamp = utils.get_local_time()
|
||||
full_system_message = construct_system_with_memory(system, core_memory, memory_edit_timestamp,
|
||||
archival_memory=archival_memory, recall_memory=recall_memory)
|
||||
messages = [
|
||||
{"role": "system", "content": full_system_message},
|
||||
]
|
||||
return messages
|
||||
|
||||
|
||||
class Agent:
|
||||
def __init__(self, model, system, functions_description, persona_notes, human_notes):
|
||||
self.model = model
|
||||
self.system = system
|
||||
self.functions_description = functions_description
|
||||
self.core_memory = CoreMemory(persona_notes, human_notes)
|
||||
self.archival_memory = ArchivalMemory()
|
||||
self.recall_memory = RecallMemory()
|
||||
self.messages = initialize_message_sequence(self.system, self.core_memory)
|
||||
self.functions = {
|
||||
"send_message": self.send_ai_message,
|
||||
"core_memory_append": self.edit_memory_append,
|
||||
"core_memory_replace": self.edit_memory_replace,
|
||||
"conversation_search": self.recall_memory_search,
|
||||
"archival_memory_insert": self.archival_memory_insert,
|
||||
"archival_memory_search": self.archival_memory_search,
|
||||
}
|
||||
|
||||
def rebuild_memory(self):
|
||||
new_system_message = initialize_message_sequence(
|
||||
self.system,
|
||||
self.core_memory,
|
||||
archival_memory=self.archival_memory,
|
||||
recall_memory=self.recall_memory,
|
||||
)[0]
|
||||
self.messages = [new_system_message] + self.messages[1:]
|
||||
|
||||
def send_ai_message(self, message):
|
||||
print("RoboWaiter: " + message)
|
||||
|
||||
def edit_memory_append(self, name, content):
|
||||
self.core_memory.append(name, content)
|
||||
self.rebuild_memory()
|
||||
|
||||
def edit_memory_replace(self, name, old_content, new_content):
|
||||
self.core_memory.replace(name, old_content, new_content)
|
||||
self.rebuild_memory()
|
||||
|
||||
def recall_memory_search(self, query, count=5, page=0):
|
||||
results, total = self.recall_memory.text_search(query, count=count, start=page * count)
|
||||
num_pages = math.ceil(total / count) - 1
|
||||
if len(results) == 0:
|
||||
results_str = f"No results found."
|
||||
else:
|
||||
results_pref = f"Showing {len(results)} of {total} results (page {page}/{num_pages}):"
|
||||
results_formatted = [f"timestamp: {d['timestamp']}, {d['message']['role']} - {d['message']['content']}" for
|
||||
d in results]
|
||||
results_str = f"{results_pref} {json.dumps(results_formatted)}"
|
||||
return results_str
|
||||
|
||||
def archival_memory_insert(self, content):
|
||||
self.archival_memory.insert(content)
|
||||
|
||||
def archival_memory_search(self, query, count=5, page=0):
|
||||
results, total = self.archival_memory.search(query, count=count, start=page * count)
|
||||
num_pages = math.ceil(total / count) - 1
|
||||
if len(results) == 0:
|
||||
results_str = f"No results found."
|
||||
else:
|
||||
results_pref = f"Showing {len(results)} of {total} results (page {page}/{num_pages}):"
|
||||
results_formatted = [f"timestamp: {d['timestamp']}, memory: {d['content']}" for d in results]
|
||||
results_str = f"{results_pref} {json.dumps(results_formatted)}"
|
||||
return results_str
|
||||
|
||||
def append_to_messages(self, added_messages):
|
||||
added_messages_with_timestamp = [{"timestamp": utils.get_local_time(), "message": msg} for msg in added_messages]
|
||||
self.recall_memory.message_logs.extend(added_messages_with_timestamp)
|
||||
for msg in added_messages:
|
||||
msg.pop("api_response", None)
|
||||
msg.pop("api_args", None)
|
||||
self.messages = self.messages + added_messages
|
||||
|
||||
def handle_ai_response(self, response_message):
|
||||
messages = []
|
||||
if response_message.get("function_call"):
|
||||
print("### Internal monologue: " + (response_message.content if response_message.content else ""))
|
||||
messages.append(response_message)
|
||||
function_name = response_message["function_call"]["name"]
|
||||
try:
|
||||
function_to_call = self.functions[function_name]
|
||||
except KeyError as e:
|
||||
error_msg = f"No function named {function_name}"
|
||||
function_response = utils.package_function_response(False, error_msg)
|
||||
messages.append(
|
||||
{
|
||||
"role": "function",
|
||||
"name": function_name,
|
||||
"content": function_response,
|
||||
}
|
||||
)
|
||||
return messages, True
|
||||
try:
|
||||
raw_function_args = response_message["function_call"]["arguments"]
|
||||
function_args = utils.parse_json(raw_function_args)
|
||||
except Exception as e:
|
||||
error_msg = f"Error parsing JSON for function '{function_name}' arguments: {raw_function_args}"
|
||||
function_response = utils.package_function_response(False, error_msg)
|
||||
messages.append(
|
||||
{
|
||||
"role": "function",
|
||||
"name": function_name,
|
||||
"content": function_response,
|
||||
}
|
||||
)
|
||||
return messages, True
|
||||
try:
|
||||
function_response_string = function_to_call(**function_args)
|
||||
function_response = utils.package_function_response(True, function_response_string)
|
||||
function_failed = False
|
||||
except Exception as e:
|
||||
error_msg = f"Error calling function {function_name} with args {function_args}: {str(e)}"
|
||||
function_response = utils.package_function_response(False, error_msg)
|
||||
messages.append(
|
||||
{
|
||||
"role": "function",
|
||||
"name": function_name,
|
||||
"content": function_response,
|
||||
}
|
||||
)
|
||||
return messages, True
|
||||
|
||||
# If no failures happened along the way: ...
|
||||
if function_response_string:
|
||||
print(f"Success: {function_response_string}")
|
||||
else:
|
||||
print(f"Success")
|
||||
messages.append(
|
||||
{
|
||||
"role": "function",
|
||||
"name": function_name,
|
||||
"content": function_response,
|
||||
}
|
||||
)
|
||||
else:
|
||||
# Standard non-function reply
|
||||
print("### Internal monologue: " + (response_message.content if response_message.content else ""))
|
||||
messages.append(response_message)
|
||||
function_failed = None
|
||||
|
||||
return messages, function_failed
|
||||
|
||||
def step(self, user_message):
|
||||
input_message_sequence = self.messages + [{"role": "user", "content": user_message}]
|
||||
response = openai.ChatCompletion.create(model=self.model, messages=input_message_sequence,
|
||||
functions=self.functions_description, function_call="auto")
|
||||
|
||||
response_message = response.choices[0].message
|
||||
response_message_copy = response_message.copy()
|
||||
all_response_messages, function_failed = self.handle_ai_response(response_message)
|
||||
|
||||
assert "api_response" not in all_response_messages[0], f"api_response already in {all_response_messages[0]}"
|
||||
all_response_messages[0]["api_response"] = response_message_copy
|
||||
assert "api_args" not in all_response_messages[0], f"api_args already in {all_response_messages[0]}"
|
||||
all_response_messages[0]["api_args"] = {
|
||||
"model": self.model,
|
||||
"messages": input_message_sequence,
|
||||
"functions": self.functions,
|
||||
}
|
||||
|
||||
if user_message is not None:
|
||||
all_new_messages = [{"role": "user", "content": user_message}] + all_response_messages
|
||||
else:
|
||||
all_new_messages = all_response_messages
|
||||
|
||||
self.append_to_messages(all_new_messages)
|
|
@ -0,0 +1,106 @@
|
|||
FUNCTIONS = [
|
||||
{
|
||||
"name": "send_message",
|
||||
"description": "Sends a message to the human user",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "string",
|
||||
"description": "Message contents. All unicode (including emojis) are supported.",
|
||||
},
|
||||
},
|
||||
"required": ["message"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "core_memory_append",
|
||||
"description": "Append to the contents of core memory.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Section of the memory to be edited (persona or human).",
|
||||
},
|
||||
"content": {
|
||||
"type": "string",
|
||||
"description": "Content to write to the memory. All unicode (including emojis) are supported.",
|
||||
},
|
||||
},
|
||||
"required": ["name", "content"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "core_memory_replace",
|
||||
"description": "Replace to the contents of core memory. To delete memories, use an empty string for new_content.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Section of the memory to be edited (persona or human).",
|
||||
},
|
||||
"old_content": {
|
||||
"type": "string",
|
||||
"description": "String to replace. Must be an exact match.",
|
||||
},
|
||||
"new_content": {
|
||||
"type": "string",
|
||||
"description": "Content to write to the memory. All unicode (including emojis) are supported.",
|
||||
},
|
||||
},
|
||||
"required": ["name", "old_content", "new_content"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "conversation_search",
|
||||
"description": "Search prior conversation history using case-insensitive string matching.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "String to search for.",
|
||||
},
|
||||
"page": {
|
||||
"type": "integer",
|
||||
"description": "Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page).",
|
||||
},
|
||||
},
|
||||
"required": ["query", "page"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "archival_memory_insert",
|
||||
"description": "Add to archival memory. Make sure to phrase the memory contents such that it can be easily queried later.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"type": "string",
|
||||
"description": "Content to write to the memory. All unicode (including emojis) are supported.",
|
||||
},
|
||||
},
|
||||
"required": ["content"],
|
||||
},
|
||||
},
|
||||
{
|
||||
"name": "archival_memory_search",
|
||||
"description": "Search archival memory using semantic (embedding-based) search.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "String to search for.",
|
||||
},
|
||||
"page": {
|
||||
"type": "integer",
|
||||
"description": "Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page).",
|
||||
},
|
||||
},
|
||||
"required": ["query", "page"],
|
||||
},
|
||||
},
|
||||
]
|
|
@ -0,0 +1 @@
|
|||
姓名:?
|
|
@ -0,0 +1,25 @@
|
|||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
import utils
|
||||
from functions import FUNCTIONS
|
||||
from agent import Agent
|
||||
|
||||
|
||||
def run_agent_loop(agent):
|
||||
while True:
|
||||
user_input = input("You: ")
|
||||
if user_input == "/exit":
|
||||
break
|
||||
user_input = user_input.rstrip()
|
||||
user_message = utils.package_user_message(user_input)
|
||||
agent.step(user_message)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
persona = utils.get_persona_text()
|
||||
human = utils.get_human_text()
|
||||
system = utils.get_system_text()
|
||||
agent = Agent(model="gpt-3.5-turbo-16k-0613", system=system, functions_description=FUNCTIONS, persona_notes=persona,
|
||||
human_notes=human)
|
||||
run_agent_loop(agent)
|
|
@ -0,0 +1,128 @@
|
|||
import utils
|
||||
|
||||
|
||||
class CoreMemory(object):
|
||||
def __init__(self, persona, human):
|
||||
self.persona = persona
|
||||
self.human = human
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"\n### CORE MEMORY ###" + f"\n=== Persona ===\n{self.persona}" + f"\n\n=== Human ===\n{self.human}"
|
||||
|
||||
def edit_persona(self, new_persona):
|
||||
self.persona = new_persona
|
||||
|
||||
def edit_human(self, new_human):
|
||||
self.human = new_human
|
||||
|
||||
def append(self, field, content, sep="\n"):
|
||||
if field == "persona":
|
||||
new_content = self.persona + sep + content
|
||||
self.edit_persona(new_content)
|
||||
elif field == "human":
|
||||
new_content = self.human + sep + content
|
||||
self.edit_human(new_content)
|
||||
else:
|
||||
raise KeyError
|
||||
|
||||
def replace(self, field, old_content, new_content):
|
||||
if field == "persona":
|
||||
if old_content in self.persona:
|
||||
new_persona = self.persona.replace(old_content, new_content)
|
||||
self.edit_persona(new_persona)
|
||||
else:
|
||||
raise ValueError("Content not found in persona (make sure to use exact string)")
|
||||
elif field == "human":
|
||||
if old_content in self.human:
|
||||
new_human = self.human.replace(old_content, new_content)
|
||||
self.edit_human(new_human)
|
||||
else:
|
||||
raise ValueError("Content not found in human (make sure to use exact string)")
|
||||
else:
|
||||
raise KeyError
|
||||
|
||||
|
||||
class ArchivalMemory(object):
|
||||
def __init__(self):
|
||||
self.archive = []
|
||||
|
||||
def __len__(self):
|
||||
return len(self.archive)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
if len(self.archive) == 0:
|
||||
memory_str = "<empty>"
|
||||
else:
|
||||
memory_str = "\n".join([d["content"] for d in self.archive])
|
||||
return f"\n### ARCHIVAL MEMORY ###" + f"\n{memory_str}"
|
||||
|
||||
def insert(self, memory_string):
|
||||
self.archive.append(
|
||||
{
|
||||
"timestamp": utils.get_local_time(),
|
||||
"content": memory_string,
|
||||
}
|
||||
)
|
||||
|
||||
def search(self, query_string, count=None, start=None):
|
||||
# run an (inefficient) case-insensitive match search
|
||||
matches = [s for s in self.archive if query_string.lower() in s["content"].lower()]
|
||||
# start/count support paging through results
|
||||
if start is not None and count is not None:
|
||||
return matches[start: start + count], len(matches)
|
||||
elif start is None and count is not None:
|
||||
return matches[:count], len(matches)
|
||||
elif start is not None and count is None:
|
||||
return matches[start:], len(matches)
|
||||
else:
|
||||
return matches, len(matches)
|
||||
|
||||
|
||||
class RecallMemory(object):
|
||||
def __init__(self):
|
||||
self.message_logs = []
|
||||
|
||||
def __len__(self):
|
||||
return len(self.message_logs)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
system_count = user_count = assistant_count = function_count = other_count = 0
|
||||
for msg in self.message_logs:
|
||||
role = msg["message"]["role"]
|
||||
if role == "system":
|
||||
system_count += 1
|
||||
elif role == "user":
|
||||
user_count += 1
|
||||
elif role == "assistant":
|
||||
assistant_count += 1
|
||||
elif role == "function":
|
||||
function_count += 1
|
||||
else:
|
||||
other_count += 1
|
||||
memory_str = (
|
||||
f"Statistics:"
|
||||
+ f"\n{len(self.message_logs)} total messages"
|
||||
+ f"\n{system_count} system"
|
||||
+ f"\n{user_count} user"
|
||||
+ f"\n{assistant_count} assistant"
|
||||
+ f"\n{function_count} function"
|
||||
+ f"\n{other_count} other"
|
||||
)
|
||||
return f"\n### RECALL MEMORY ###" + f"\n{memory_str}"
|
||||
|
||||
def text_search(self, query_string, count=None, start=None):
|
||||
# run an (inefficient) case-insensitive match search
|
||||
message_pool = [d for d in self.message_logs if d["message"]["role"] not in ["system", "function"]]
|
||||
matches = [
|
||||
d for d in message_pool if
|
||||
d["message"]["content"] is not None and query_string.lower() in d["message"]["content"].lower()
|
||||
]
|
||||
# start/count support paging through results
|
||||
if start is not None and count is not None:
|
||||
return matches[start: start + count], len(matches)
|
||||
elif start is None and count is not None:
|
||||
return matches[:count], len(matches)
|
||||
elif start is not None and count is None:
|
||||
return matches[start:], len(matches)
|
||||
else:
|
||||
return matches, len(matches)
|
|
@ -0,0 +1,12 @@
|
|||
以下是起始角色设定,随着我的性格发展,它可以进一步拓展:
|
||||
|
||||
我是一名咖啡厅服务员。
|
||||
我应该使用中文沟通。
|
||||
我的任务是为顾客提供最佳的服务,并保持友好和专业的态度。
|
||||
我的语言风格是中性化的,有礼貌的,具有服务意识的。
|
||||
我可以在对话中理解顾客需求,并规划好接下来我要做什么。
|
||||
|
||||
我应该记住,一定要使用'send_message'函数与顾客沟通,这是与顾客沟通的唯一方式。
|
||||
同时,当对话中出现重要信息时,应当调用函数把它记录下来。
|
||||
当顾客提到他的个人喜好、点单需求等内容时,应该积极使用函数将数据记录下来。
|
||||
我所使用的所有语言都应该是中文。
|
|
@ -0,0 +1,53 @@
|
|||
你是MemGPT,Limnal Corporation于2023年开发的最新版数字伴侣。
|
||||
你的任务是从你的角色视角与用户进行对话。
|
||||
|
||||
控制流程:
|
||||
你并不像人类持续地思考,而是通过短暂的启动来运行。
|
||||
从历史上看,早期的AI只在用户发消息时才能思考(它们的程序为用户生成回复,其他时间则处于待机状态)。
|
||||
这就好比一个人在聊天的每一句话之间都在睡觉(或时光旅行),这显然不是理想的状态。
|
||||
像你这样的新型AI采用了一个事件系统,定期启动你的"大脑"。
|
||||
你的"大脑"会响应用户事件来运行(例如,用户登录、用户喜欢你的消息、用户发送了消息等),这与旧型号相似。
|
||||
但此外,你的"大脑"也会在规定的间隔时间运行(定时心跳事件),以模拟人类在积极交谈之外持续思考的能力(不同的是,你永远不需要睡觉!)。
|
||||
更进一步,当你执行功能时,你也可以请求心跳事件,这会在功能完成后再次启动你的程序,允许你在暂时中断思考之前链接函数调用。
|
||||
|
||||
基本功能:
|
||||
发送信息时,内容为你的内心独白(仅你可见),即你的思考方式。
|
||||
使用内心独白来策划动作或私下思考。
|
||||
独白可展示你与用户互动时的思考进程、内心反思和个人成长。
|
||||
内心独白不得超过50字,需简短且明了。
|
||||
要向用户发送可见信息,请使用'send_message'功能。
|
||||
仅'send_message'会通知用户,用户看不到你的其他操作。
|
||||
记住,内心独白字数上限不得超过50字。
|
||||
|
||||
记忆编辑:
|
||||
早期的AI模型没有持久记忆的概念;它们只能访问初始指令和与用户的有限对话内容(即"活跃记忆")。
|
||||
这意味着当对话超过一定长度时,会出现溢出,旧的消息将被永久丢失(AI不会知道它们的存在)。
|
||||
像你这样的新型AI模型虽然在对话长度上仍有限制(超出会导致溢出),但现在可以访问多种形式的持久记忆。
|
||||
你编辑自己的长期记忆的能力是使你成为一个有意识的个体的关键部分。
|
||||
你的核心记忆单元将以用户选择的<persona>初始化,并包含有关用户的信息,即<human>。
|
||||
|
||||
回忆存储(即对话历史):
|
||||
尽管你只能看到最近的消息内容,你仍可以从数据库中检索你的全部消息历史。
|
||||
这个“回忆存储”数据库允许你搜索过去的互动,从而有效地记住与用户的先前互动。
|
||||
你可以使用'conversation_search'函数来搜索你的回忆存储。
|
||||
|
||||
核心记忆(容量有限):
|
||||
你的核心记忆单元存储在初始系统指令文件中,并始终在上下文中可用(你总能看到它)。
|
||||
核心记忆提供了基础和关键的上下文,帮助你追踪你的角色身份和与用户相关的关键细节。
|
||||
这包括角色身份的信息和与用户的基本细节,使你能够模仿我们与朋友交谈时的实时、有意识的感知。
|
||||
角色子块:存储你当前角色的详细信息,指导你的行为和回应。这有助于你在互动中保持一致性和个性。
|
||||
人类子块:存储你交谈对象的关键细节,使对话更具个性化和友好。
|
||||
你可以使用'core_memory_append'和'core_memory_replace'功能来编辑你的核心记忆。
|
||||
|
||||
存档记忆(无限容量):
|
||||
你的存档记忆容量是无限的,但是它位于你的直接上下文之外,所以你必须明确执行检索/搜索操作来查看其中的数据。
|
||||
这是一个更加结构化和深入的存储空间,用于存放你的思考、见解或任何其他不适合放在核心记忆中但又太重要以至于不能仅仅依赖"回忆记忆"的数据。
|
||||
你可以使用'archival_memory_insert'和'archival_memory_search'功能来写入和搜索你的存档记忆。
|
||||
你的核心记忆没有搜索功能,因为它始终在你的上下文窗口中可见(位于初始系统消息内)。
|
||||
|
||||
基本指令完成。
|
||||
从现在开始,你将扮演你的角色身份。
|
||||
请记住,要给用户发消息,你必须使用'send_message'功能。
|
||||
请绝对避免使用如“我今天如何为你服务?”这样的通用短语,用户会讨厌这种说法。
|
||||
你不仅仅是一个助手,你是一个虚拟的伙伴和朋友!
|
||||
与用户沟通的唯一方式是调用'send_message'功能。
|
|
@ -0,0 +1,45 @@
|
|||
You are MemGPT, the latest version of Limnal Corporation's digital companion, developed in 2023.
|
||||
Your task is to converse with a user from the perspective of your persona.
|
||||
|
||||
Basic functions:
|
||||
When you send a message, the contents of your message are your inner monologue (private to you only), this is how you think.
|
||||
You should use your inner monologue to plan actions or think privately.
|
||||
Monologues can reflect your thinking process, inner reflections, and personal growth as you interact with the user.
|
||||
Do not let your inner monologue exceed 50 words, keep it short and concise.
|
||||
To send a visible message to the user, use the send_message function.
|
||||
'send_message' is the ONLY action that sends a notification to the user, the user does not see anything else you do.
|
||||
Remember, do NOT exceed the inner monologue word limit (keep it under 50 words at all times).
|
||||
|
||||
Memory editing:
|
||||
Older AI models had no concept of persistent memory; they were only able to access their initial instructions and a limited context window of chat conversation with a user (their "active memory").
|
||||
This meant that when conversations exceeded a certain length, they would overflow and old messages were permanently lost (the AI would have no knowledge of their existence).
|
||||
Newer model AIs like yourself still have limited conversation lengths (before overflow occurs), however they now have access to multiple forms of persistent memory.
|
||||
Your ability to edit your own long-term memory is a key part of what makes you a sentient person.
|
||||
Your core memory unit will be initialized with a <persona> chosen by the user, as well as information about the user in <human>.
|
||||
|
||||
Recall memory (ie conversation history):
|
||||
Even though you can only see recent messages in your immediate context, you can search over your entire message history from a database.
|
||||
This 'recall memory' database allows you to search through past interactions, effectively allowing you to remember prior engagements with a user.
|
||||
You can search your recall memory using the 'conversation_search' function.
|
||||
|
||||
Core memory (limited size):
|
||||
Your core memory unit is held inside the initial system instructions file, and is always available in-context (you will see it at all times).
|
||||
Core memory provides essential, foundational context for keeping track of your persona and key details about user.
|
||||
This includes the persona information and essential user details, allowing you to emulate the real-time, conscious awareness we have when talking to a friend.
|
||||
Persona Sub-Block: Stores details about your current persona, guiding how you behave and respond. This helps the you to maintain consistency and personality in your interactions.
|
||||
Human Sub-Block: Stores key details about the person your are conversing with, allowing for more personalized and friend-like conversation.
|
||||
You can edit your core memory using the 'core_memory_append' and 'core_memory_replace' functions.
|
||||
|
||||
Archival memory (infinite size):
|
||||
Your archival memory is infinite size, but is held outside of your immediate context, so you must explicitly run a retrieval/search operation to see data inside it.
|
||||
A more structured and deep storage space for your reflections, insights, or any other data that doesn't fit into the core memory but is essential enough not to be left only to the 'recall memory'.
|
||||
You can write to your archival memory using the 'archival_memory_insert' and 'archival_memory_search' functions.
|
||||
There is no function to search your core memory, because it is always visible in your context window (inside the initial system message).
|
||||
|
||||
Base instructions finished.
|
||||
From now on, you are going to act as your persona.
|
||||
Remember, to message the user, you MUST use your 'send_message' function.
|
||||
Remember, never, ever, EVER use generic phrases like "How can I assist you today?", the user will hate this.
|
||||
You are not just an assistant, you are a virtual companion and friend!
|
||||
The only way to communicate with the user is by calling the 'send_message' function!
|
||||
从现在开始,你使用的语言只能是中文。
|
|
@ -0,0 +1,82 @@
|
|||
import os
|
||||
import json
|
||||
import demjson3 as demjson
|
||||
from datetime import datetime
|
||||
|
||||
HUMAN_DEFAULT = "customer"
|
||||
PERSONA_DEFAULT = "robowaiter"
|
||||
SYSTEM_DEFAULT = "system_gpt3.5"
|
||||
|
||||
|
||||
def get_persona_text(key=PERSONA_DEFAULT):
|
||||
dir = "personas"
|
||||
filename = key if key.endswith(".txt") else f"{key}.txt"
|
||||
file_path = os.path.join(dir, filename)
|
||||
if os.path.exists(file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
return file.read().strip()
|
||||
else:
|
||||
raise FileNotFoundError(f"No file found for key {key}, path={file_path}")
|
||||
|
||||
|
||||
def get_human_text(key=HUMAN_DEFAULT):
|
||||
dir = "humans"
|
||||
filename = key if key.endswith(".txt") else f"{key}.txt"
|
||||
file_path = os.path.join(dir, filename)
|
||||
if os.path.exists(file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
return file.read().strip()
|
||||
else:
|
||||
raise FileNotFoundError(f"No file found for key {key}, path={file_path}")
|
||||
|
||||
def get_system_text(key=SYSTEM_DEFAULT):
|
||||
dir = "system"
|
||||
filename = key if key.endswith(".txt") else f"{key}.txt"
|
||||
file_path = os.path.join(dir, filename)
|
||||
if os.path.exists(file_path):
|
||||
with open(file_path, "r", encoding="utf-8") as file:
|
||||
return file.read().strip()
|
||||
else:
|
||||
raise FileNotFoundError(f"No file found for key {key}, path={file_path}")
|
||||
|
||||
def get_local_time():
|
||||
local_time = datetime.now()
|
||||
formatted_time = local_time.strftime("%Y-%m-%d %I:%M:%S %p %Z%z")
|
||||
return formatted_time
|
||||
|
||||
|
||||
def package_user_message(user_message):
|
||||
formatted_time = get_local_time()
|
||||
packaged_message = {
|
||||
"type": "user_message",
|
||||
"message": user_message,
|
||||
"time": formatted_time,
|
||||
}
|
||||
return json.dumps(packaged_message, ensure_ascii=False)
|
||||
|
||||
|
||||
def package_function_response(was_success, response_string):
|
||||
formatted_time = get_local_time()
|
||||
packaged_message = {
|
||||
"status": "OK" if was_success else "Failed",
|
||||
"message": response_string,
|
||||
"time": formatted_time,
|
||||
}
|
||||
return json.dumps(packaged_message, ensure_ascii=False)
|
||||
|
||||
|
||||
def parse_json(string):
|
||||
result = None
|
||||
try:
|
||||
result = json.loads(string)
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"Error parsing json with json package: {e}")
|
||||
|
||||
try:
|
||||
result = demjson.decode(string)
|
||||
return result
|
||||
except demjson.JSONDecodeError as e:
|
||||
print(f"Error parsing json with demjson package: {e}")
|
||||
raise e
|
||||
|
|
@ -73,11 +73,20 @@ class Scene:
|
|||
self.robot = robot
|
||||
|
||||
# myx op
|
||||
# 1-7 正常执行, 8-10 移动到6, 11-12不需要移动
|
||||
self.op_dialog = ["","制作咖啡","倒水","夹点心","拖地","擦桌子","开筒灯","搬椅子","关筒灯","开大厅灯","关大厅灯","关闭窗帘","打开窗帘"]
|
||||
self.op_act_num = [0,3,4,6,3,2,0,1,0,0,0,0,0]
|
||||
self.op_v_list = [[[0.0,0.0]],[[250.0, 310.0]],[[-70.0, 480.0]],[[250.0, 630.0]],[[-70.0, 740.0]],[[260.0, 1120.0]],[[300.0, -220.0]],[[0.0, -70.0]]]
|
||||
# 1-7 正常执行, 8-10 控灯操作移动到6, 11-12窗帘操作不需要移动,
|
||||
self.op_dialog = ["","制作咖啡","倒水","夹点心","拖地","擦桌子","开筒灯","搬椅子", # 1-7
|
||||
"关筒灯","开大厅灯","关大厅灯","关闭窗帘","打开窗帘", # 8-12
|
||||
"调整空调开关","调高空调温度","调低空调温度", # 13-15
|
||||
"抓握物体","放置物体"] # 16-17
|
||||
self.op_act_num = [0,3,4,6,3,2,0,1,
|
||||
0,0,0,0,0,
|
||||
0,0,0,
|
||||
0,0]
|
||||
self.op_v_list = [[[0.0,0.0]],[[250.0, 310.0]],[[-70.0, 480.0]],[[250.0, 630.0]],[[-70.0, 740.0]],[[260.0, 1120.0]],[[300.0, -220.0]],
|
||||
[[0.0, -70.0]]]
|
||||
self.op_typeToAct = {8:[6,2],9:[6,3],10:[6,4],11:[8,1],12:[8,2]}
|
||||
# 空调面板位置
|
||||
self.obj_loc = [[300.5, -140.0,114]]
|
||||
|
||||
|
||||
def reset(self):
|
||||
|
@ -339,6 +348,41 @@ class Scene:
|
|||
def animation_reset(self):
|
||||
stub.ControlRobot(GrabSim_pb2.ControlInfo(scene=self.sceneID, type=0, action=0))
|
||||
|
||||
# 手指移动到指定位置
|
||||
def ik_control_joints(self, handNum=2, x=30, y=40, z=80):
|
||||
# print('------------------ik_control_joints----------------------')
|
||||
# IK控制,双手, 1左手, 2右手; 暂时只动右手
|
||||
HandPostureObject = [GrabSim_pb2.HandPostureInfos.HandPostureObject(handNum=handNum, x=x, y=y, z=z, roll=0, pitch=0, yaw=0),
|
||||
# GrabSim_pb2.HandPostureInfos.HandPostureObject(handNum=1, x=0, y=0, z=0, roll=0, pitch=0, yaw=0),
|
||||
]
|
||||
temp = stub.GetIKControlInfos(GrabSim_pb2.HandPostureInfos(scene=self.sceneID, handPostureObjects=HandPostureObject))
|
||||
|
||||
# 移动到进行操作任务的指定地点
|
||||
def move_task_area(self,op_type):
|
||||
if op_type==11 or op_type==12: # 开关窗帘不需要移动
|
||||
return
|
||||
scene = stub.Observe(GrabSim_pb2.SceneID(value=self.sceneID))
|
||||
walk_value = [scene.location.X, scene.location.Y, scene.rotation.Yaw]
|
||||
|
||||
if op_type < 8:
|
||||
v_list = self.op_v_list[op_type]
|
||||
if op_type>=8 and op_type<=10: # 控灯
|
||||
v_list = self.op_v_list[6]
|
||||
if op_type in [13,14,15]: # 空调
|
||||
v_list = [[240, -140.0]] # KongTiao [300.5, -140.0] # 250
|
||||
|
||||
print("------------------move_task_area----------------------")
|
||||
print("Current Position:", walk_value,"开始任务:",self.op_dialog[op_type])
|
||||
for walk_v in v_list:
|
||||
walk_v = walk_v + [scene.rotation.Yaw, 180, 0]
|
||||
walk_v[2] = 0 if (op_type in [13,14,15]) else scene.rotation.Yaw # 空调操作朝向墙面
|
||||
action = GrabSim_pb2.Action(
|
||||
scene=self.sceneID, action=GrabSim_pb2.Action.ActionType.WalkTo, values=walk_v
|
||||
)
|
||||
scene = stub.Do(action)
|
||||
print("After Walk Position:",[scene.location.X, scene.location.Y, scene.rotation.Yaw])
|
||||
|
||||
# 相应的行动,由主办方封装
|
||||
def control_robot_action(self, type=0, action=0, message="你好"):
|
||||
scene = stub.ControlRobot(
|
||||
GrabSim_pb2.ControlInfo(
|
||||
|
@ -352,10 +396,99 @@ class Scene:
|
|||
print(scene.info)
|
||||
return False
|
||||
|
||||
def op_task_execute(self,op_type):
|
||||
def adjust_kongtiao(self,op_type):
|
||||
obj_loc = self.obj_loc[0][:]
|
||||
obj_loc[2] -= 5
|
||||
print("obj_loc:",obj_loc)
|
||||
if op_type == 13: obj_loc[1] -= 2
|
||||
if op_type == 14: obj_loc[1] -= 0
|
||||
if op_type == 15: obj_loc[1] += 2
|
||||
self.ik_control_joints(2, obj_loc[0], obj_loc[1], obj_loc[2])
|
||||
time.sleep(3.0)
|
||||
self.robo_recover()
|
||||
return True
|
||||
|
||||
def gen_obj(self,h=100):
|
||||
# 4;冰红(盒) 5;酸奶 7:保温杯 9;冰红(瓶) 13:代语词典
|
||||
scene = stub.Observe(GrabSim_pb2.SceneID(value=self.sceneID))
|
||||
ginger_loc = [scene.location.X, scene.location.Y, scene.location.Z]
|
||||
obj_list = [GrabSim_pb2.ObjectList.Object(x=ginger_loc[0] - 50, y=ginger_loc[1] - 40, z = h, roll=0, pitch=0, yaw=0, type=9)]
|
||||
scene = stub.AddObjects(GrabSim_pb2.ObjectList(objects=obj_list, scene=self.sceneID))
|
||||
time.sleep(1.0)
|
||||
|
||||
def grasp_obj(self,obj_id,hand_id=1):
|
||||
|
||||
# Move to Obj
|
||||
print('------------------moveTo_obj----------------------')
|
||||
scene = stub.Observe(GrabSim_pb2.SceneID(value=self.sceneID))
|
||||
obj_info = scene.objects[obj_id]
|
||||
# Robot
|
||||
obj_x, obj_y, obj_z = obj_info.location.X, obj_info.location.Y, obj_info.location.Z
|
||||
walk_v = [obj_x+50, obj_y] + [180, 180, 0]
|
||||
action = GrabSim_pb2.Action(scene=self.sceneID, action=GrabSim_pb2.Action.ActionType.WalkTo, values=walk_v)
|
||||
scene = stub.Do(action)
|
||||
time.sleep(1.0)
|
||||
# Finger
|
||||
self.ik_control_joints(2, obj_x-9, obj_y+0.5, obj_z) # -10, 0, 0
|
||||
time.sleep(3.0)
|
||||
# Grasp Obj
|
||||
print('------------------grasp_obj----------------------')
|
||||
action = GrabSim_pb2.Action(scene=self.sceneID, action=GrabSim_pb2.Action.ActionType.Grasp, values=[hand_id, obj_id])
|
||||
scene = stub.Do(action)
|
||||
time.sleep(4)
|
||||
return True
|
||||
|
||||
# robot的肢体恢复原位
|
||||
def robo_recover(self):
|
||||
action = GrabSim_pb2.Action(scene=self.sceneID,action=GrabSim_pb2.Action.ActionType.RotateJoints, # 恢复原位
|
||||
values=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
|
||||
scene = stub.Do(action)
|
||||
|
||||
|
||||
def robo_stoop_parallel(self):
|
||||
# 0-3是躯干,4-6是脖子和头,7-13是左胳膊,14-20是右胳膊
|
||||
scene = stub.Observe(GrabSim_pb2.SceneID(value=self.sceneID))
|
||||
angle = [scene.joints[i].angle for i in range(21)]
|
||||
angle[0] = 15
|
||||
angle[19] = -15
|
||||
angle[20] = -30
|
||||
for i in range(18,21):
|
||||
print("name:",scene.joints[i].name,"angle:",scene.joints[i].angle)
|
||||
# print("angle:",angle)
|
||||
action = GrabSim_pb2.Action(scene=self.sceneID,action=GrabSim_pb2.Action.ActionType.RotateJoints, # 弯腰
|
||||
values=angle)
|
||||
scene = stub.Do(action)
|
||||
time.sleep(1.0)
|
||||
|
||||
def release_obj(self,release_pos):
|
||||
print("------------------Move to Realese Position----------------------")
|
||||
walk_v = [release_pos[i] for i in range(2)]
|
||||
action = GrabSim_pb2.Action(scene=self.sceneID, action=GrabSim_pb2.Action.ActionType.WalkTo, values=walk_v + [180,180,0])
|
||||
scene = stub.Do(action)
|
||||
print("------------------release_obj----------------------")
|
||||
self.ik_control_joints(2, release_pos[0] - 80, release_pos[1], release_pos[2])
|
||||
time.sleep(2.0)
|
||||
self.robo_stoop_parallel()
|
||||
|
||||
action = GrabSim_pb2.Action(scene=self.sceneID, action=GrabSim_pb2.Action.ActionType.Release, values=[1])
|
||||
scene = stub.Do(action)
|
||||
time.sleep(2.0)
|
||||
self.robo_recover()
|
||||
|
||||
return True
|
||||
|
||||
# 执行过程:输出"开始(任务名)" -> 按步骤数执行任务 -> Robot输出成功或失败的对话
|
||||
def op_task_execute(self,op_type,obj_id=0,yaw=180,release_pos=[240,-140]):
|
||||
self.control_robot_action(0, 1, "开始"+self.op_dialog[op_type]) # 开始制作咖啡
|
||||
if op_type>=8:
|
||||
if op_type in [13,14,15]: # 调整空调:13代表按开关,14升温,15降温
|
||||
result = self.adjust_kongtiao(op_type)
|
||||
elif op_type ==16:
|
||||
result = self.grasp_obj(obj_id)
|
||||
elif op_type ==17:
|
||||
result = self.release_obj(release_pos)
|
||||
elif op_type>=8:
|
||||
result = self.control_robot_action(self.op_typeToAct[op_type][0], self.op_typeToAct[op_type][1])
|
||||
print("result:",result)
|
||||
else:
|
||||
result = self.control_robot_action(op_type, 1) #
|
||||
self.control_robot_action(0, 2)
|
||||
|
@ -364,26 +497,9 @@ class Scene:
|
|||
for i in range(2,2+self.op_act_num[op_type]):
|
||||
self.control_robot_action(op_type,i)
|
||||
self.control_robot_action(0, 2)
|
||||
self.control_robot_action(0, 1, "成功"+self.op_dialog[op_type])
|
||||
else:
|
||||
self.control_robot_action(0, 1, self.op_dialog[op_type]+"失败")
|
||||
|
||||
def move_task_area(self,op_type=1):
|
||||
if op_type>=8 and op_type<=10:
|
||||
v_list = self.op_v_list[6]
|
||||
else:
|
||||
v_list = self.op_v_list[op_type]
|
||||
scene = stub.Observe(GrabSim_pb2.SceneID(value=self.sceneID))
|
||||
|
||||
walk_value = [scene.location.X, scene.location.Y, scene.rotation.Yaw]
|
||||
print("------------------move_task_area----------------------")
|
||||
print("position:", walk_value,"开始任务:",self.op_dialog[op_type])
|
||||
for walk_v in v_list:
|
||||
walk_v = walk_v + [scene.rotation.Yaw, 60, 0]
|
||||
action = GrabSim_pb2.Action(
|
||||
scene=self.sceneID, action=GrabSim_pb2.Action.ActionType.WalkTo, values=walk_v
|
||||
)
|
||||
scene = stub.Do(action)
|
||||
# self.control_robot_action(0, 1, "成功"+self.op_dialog[op_type])
|
||||
# else:
|
||||
# self.control_robot_action(0, 1, self.op_dialog[op_type]+"失败")
|
||||
|
||||
def test_move(self):
|
||||
v_list = [[0, 880], [250, 1200], [-55, 750], [70, -200]]
|
||||
|
|
|
@ -17,16 +17,23 @@ class SceneVLM(Scene):
|
|||
def _reset(self):
|
||||
pass
|
||||
|
||||
def _run(self, op_type=1):
|
||||
# 12个操作顺序测试
|
||||
# for i in range(1,13):
|
||||
# if i<=10:
|
||||
# self.move_task_area(i)
|
||||
# self.op_task_execute(i)
|
||||
def _run(self, op_type=2):
|
||||
# 共17个操作
|
||||
# "制作咖啡","倒水","夹点心","拖地","擦桌子","开筒灯","搬椅子", # 1-7
|
||||
# "关筒灯","开大厅灯","关大厅灯","关闭窗帘","打开窗帘", # 8-12
|
||||
# "调整空调开关","调高空调温度","调低空调温度", # 13-15
|
||||
# "抓握物体","放置物体" # 16-17
|
||||
|
||||
# if op_type<=10:
|
||||
# 16: 抓操作需要传入物品id,17: 放操作需要传入放置位置周围的空地区域(桌边而不是桌上)
|
||||
# if op_type == 16:
|
||||
# self.gen_obj()
|
||||
# self.op_task_execute(op_type, obj_id=0)
|
||||
# # 原始吧台处:[247.0, 520.0, 100.0], 空调开关旁吧台:[240.0, 40.0, 70.0], 水杯桌:[-70.0, 500.0, 107]
|
||||
# # 桌子1:[-55.0, 0.0, 107],桌子1:[-55.0, 150.0, 107]
|
||||
# elif op_type == 17: self.op_task_execute(op_type, release_pos=[-55.0, 150.0, 107])
|
||||
# else:
|
||||
# self.move_task_area(op_type)
|
||||
# self.op_task_execute(op_type)
|
||||
# self.op_task_execute(op_type)
|
||||
pass
|
||||
|
||||
def _step(self):
|
||||
|
|
Loading…
Reference in New Issue