MemGPT简化版,提示词待更新

This commit is contained in:
Antiman-cmyk 2023-11-10 09:50:58 +08:00
parent aa205e21e6
commit 08642f5b12
10 changed files with 656 additions and 0 deletions

View File

@ -0,0 +1,2 @@
OPENAI_API_BASE=
OPENAI_API_KEY=

View File

@ -0,0 +1,202 @@
import math
import json
import openai
import utils
from memory import CoreMemory, ArchivalMemory, RecallMemory
def construct_system_with_memory(system, core_memory, memory_edit_timestamp, archival_memory=None, recall_memory=None):
full_system_message = "\n".join(
[
system,
"\n",
f"### Memory [last modified: {memory_edit_timestamp}]",
f"{len(recall_memory) if recall_memory else 0} previous messages between you and the user are stored in recall memory (use functions to access them)",
f"{len(archival_memory) if archival_memory else 0} total memories you created are stored in archival memory (use functions to access them)",
"\nCore memory shown below (limited in size, additional information stored in archival / recall memory):",
"<persona>",
core_memory.persona,
"</persona>",
"<human>",
core_memory.human,
"</human>",
]
)
return full_system_message
def initialize_message_sequence(system, core_memory, archival_memory=None, recall_memory=None):
memory_edit_timestamp = utils.get_local_time()
full_system_message = construct_system_with_memory(system, core_memory, memory_edit_timestamp,
archival_memory=archival_memory, recall_memory=recall_memory)
messages = [
{"role": "system", "content": full_system_message},
]
return messages
class Agent:
def __init__(self, model, system, functions_description, persona_notes, human_notes):
self.model = model
self.system = system
self.functions_description = functions_description
self.core_memory = CoreMemory(persona_notes, human_notes)
self.archival_memory = ArchivalMemory()
self.recall_memory = RecallMemory()
self.messages = initialize_message_sequence(self.system, self.core_memory)
self.functions = {
"send_message": self.send_ai_message,
"core_memory_append": self.edit_memory_append,
"core_memory_replace": self.edit_memory_replace,
"conversation_search": self.recall_memory_search,
"archival_memory_insert": self.archival_memory_insert,
"archival_memory_search": self.archival_memory_search,
}
def rebuild_memory(self):
new_system_message = initialize_message_sequence(
self.system,
self.core_memory,
archival_memory=self.archival_memory,
recall_memory=self.recall_memory,
)[0]
self.messages = [new_system_message] + self.messages[1:]
def send_ai_message(self, message):
print("RoboWaiter: " + message)
def edit_memory_append(self, name, content):
self.core_memory.append(name, content)
self.rebuild_memory()
def edit_memory_replace(self, name, old_content, new_content):
self.core_memory.replace(name, old_content, new_content)
self.rebuild_memory()
def recall_memory_search(self, query, count=5, page=0):
results, total = self.recall_memory.text_search(query, count=count, start=page * count)
num_pages = math.ceil(total / count) - 1
if len(results) == 0:
results_str = f"No results found."
else:
results_pref = f"Showing {len(results)} of {total} results (page {page}/{num_pages}):"
results_formatted = [f"timestamp: {d['timestamp']}, {d['message']['role']} - {d['message']['content']}" for
d in results]
results_str = f"{results_pref} {json.dumps(results_formatted)}"
return results_str
def archival_memory_insert(self, content):
self.archival_memory.insert(content)
def archival_memory_search(self, query, count=5, page=0):
results, total = self.archival_memory.search(query, count=count, start=page * count)
num_pages = math.ceil(total / count) - 1
if len(results) == 0:
results_str = f"No results found."
else:
results_pref = f"Showing {len(results)} of {total} results (page {page}/{num_pages}):"
results_formatted = [f"timestamp: {d['timestamp']}, memory: {d['content']}" for d in results]
results_str = f"{results_pref} {json.dumps(results_formatted)}"
return results_str
def append_to_messages(self, added_messages):
added_messages_with_timestamp = [{"timestamp": utils.get_local_time(), "message": msg} for msg in added_messages]
self.recall_memory.message_logs.extend(added_messages_with_timestamp)
for msg in added_messages:
msg.pop("api_response", None)
msg.pop("api_args", None)
self.messages = self.messages + added_messages
def handle_ai_response(self, response_message):
messages = []
if response_message.get("function_call"):
print("### Internal monologue: " + (response_message.content if response_message.content else ""))
messages.append(response_message)
function_name = response_message["function_call"]["name"]
try:
function_to_call = self.functions[function_name]
except KeyError as e:
error_msg = f"No function named {function_name}"
function_response = utils.package_function_response(False, error_msg)
messages.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
)
return messages, True
try:
raw_function_args = response_message["function_call"]["arguments"]
function_args = utils.parse_json(raw_function_args)
except Exception as e:
error_msg = f"Error parsing JSON for function '{function_name}' arguments: {raw_function_args}"
function_response = utils.package_function_response(False, error_msg)
messages.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
)
return messages, True
try:
function_response_string = function_to_call(**function_args)
function_response = utils.package_function_response(True, function_response_string)
function_failed = False
except Exception as e:
error_msg = f"Error calling function {function_name} with args {function_args}: {str(e)}"
function_response = utils.package_function_response(False, error_msg)
messages.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
)
return messages, True
# If no failures happened along the way: ...
if function_response_string:
print(f"Success: {function_response_string}")
else:
print(f"Success")
messages.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
)
else:
# Standard non-function reply
print("### Internal monologue: " + (response_message.content if response_message.content else ""))
messages.append(response_message)
function_failed = None
return messages, function_failed
def step(self, user_message):
input_message_sequence = self.messages + [{"role": "user", "content": user_message}]
response = openai.ChatCompletion.create(model=self.model, messages=input_message_sequence,
functions=self.functions_description, function_call="auto")
response_message = response.choices[0].message
response_message_copy = response_message.copy()
all_response_messages, function_failed = self.handle_ai_response(response_message)
assert "api_response" not in all_response_messages[0], f"api_response already in {all_response_messages[0]}"
all_response_messages[0]["api_response"] = response_message_copy
assert "api_args" not in all_response_messages[0], f"api_args already in {all_response_messages[0]}"
all_response_messages[0]["api_args"] = {
"model": self.model,
"messages": input_message_sequence,
"functions": self.functions,
}
if user_message is not None:
all_new_messages = [{"role": "user", "content": user_message}] + all_response_messages
else:
all_new_messages = all_response_messages
self.append_to_messages(all_new_messages)

View File

@ -0,0 +1,106 @@
FUNCTIONS = [
{
"name": "send_message",
"description": "Sends a message to the human user",
"parameters": {
"type": "object",
"properties": {
"message": {
"type": "string",
"description": "Message contents. All unicode (including emojis) are supported.",
},
},
"required": ["message"],
},
},
{
"name": "core_memory_append",
"description": "Append to the contents of core memory.",
"parameters": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Section of the memory to be edited (persona or human).",
},
"content": {
"type": "string",
"description": "Content to write to the memory. All unicode (including emojis) are supported.",
},
},
"required": ["name", "content"],
},
},
{
"name": "core_memory_replace",
"description": "Replace to the contents of core memory. To delete memories, use an empty string for new_content.",
"parameters": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Section of the memory to be edited (persona or human).",
},
"old_content": {
"type": "string",
"description": "String to replace. Must be an exact match.",
},
"new_content": {
"type": "string",
"description": "Content to write to the memory. All unicode (including emojis) are supported.",
},
},
"required": ["name", "old_content", "new_content"],
},
},
{
"name": "conversation_search",
"description": "Search prior conversation history using case-insensitive string matching.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "String to search for.",
},
"page": {
"type": "integer",
"description": "Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page).",
},
},
"required": ["query", "page"],
},
},
{
"name": "archival_memory_insert",
"description": "Add to archival memory. Make sure to phrase the memory contents such that it can be easily queried later.",
"parameters": {
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "Content to write to the memory. All unicode (including emojis) are supported.",
},
},
"required": ["content"],
},
},
{
"name": "archival_memory_search",
"description": "Search archival memory using semantic (embedding-based) search.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "String to search for.",
},
"page": {
"type": "integer",
"description": "Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page).",
},
},
"required": ["query", "page"],
},
},
]

View File

@ -0,0 +1 @@
姓名:?

View File

@ -0,0 +1,25 @@
from dotenv import load_dotenv
load_dotenv()
import utils
from functions import FUNCTIONS
from agent import Agent
def run_agent_loop(agent):
while True:
user_input = input("You: ")
if user_input == "/exit":
break
user_input = user_input.rstrip()
user_message = utils.package_user_message(user_input)
agent.step(user_message)
if __name__ == "__main__":
persona = utils.get_persona_text()
human = utils.get_human_text()
system = utils.get_system_text()
agent = Agent(model="gpt-3.5-turbo-16k-0613", system=system, functions_description=FUNCTIONS, persona_notes=persona,
human_notes=human)
run_agent_loop(agent)

View File

@ -0,0 +1,128 @@
import utils
class CoreMemory(object):
def __init__(self, persona, human):
self.persona = persona
self.human = human
def __repr__(self) -> str:
return f"\n### CORE MEMORY ###" + f"\n=== Persona ===\n{self.persona}" + f"\n\n=== Human ===\n{self.human}"
def edit_persona(self, new_persona):
self.persona = new_persona
def edit_human(self, new_human):
self.human = new_human
def append(self, field, content, sep="\n"):
if field == "persona":
new_content = self.persona + sep + content
self.edit_persona(new_content)
elif field == "human":
new_content = self.human + sep + content
self.edit_human(new_content)
else:
raise KeyError
def replace(self, field, old_content, new_content):
if field == "persona":
if old_content in self.persona:
new_persona = self.persona.replace(old_content, new_content)
self.edit_persona(new_persona)
else:
raise ValueError("Content not found in persona (make sure to use exact string)")
elif field == "human":
if old_content in self.human:
new_human = self.human.replace(old_content, new_content)
self.edit_human(new_human)
else:
raise ValueError("Content not found in human (make sure to use exact string)")
else:
raise KeyError
class ArchivalMemory(object):
def __init__(self):
self.archive = []
def __len__(self):
return len(self.archive)
def __repr__(self) -> str:
if len(self.archive) == 0:
memory_str = "<empty>"
else:
memory_str = "\n".join([d["content"] for d in self.archive])
return f"\n### ARCHIVAL MEMORY ###" + f"\n{memory_str}"
def insert(self, memory_string):
self.archive.append(
{
"timestamp": utils.get_local_time(),
"content": memory_string,
}
)
def search(self, query_string, count=None, start=None):
# run an (inefficient) case-insensitive match search
matches = [s for s in self.archive if query_string.lower() in s["content"].lower()]
# start/count support paging through results
if start is not None and count is not None:
return matches[start: start + count], len(matches)
elif start is None and count is not None:
return matches[:count], len(matches)
elif start is not None and count is None:
return matches[start:], len(matches)
else:
return matches, len(matches)
class RecallMemory(object):
def __init__(self):
self.message_logs = []
def __len__(self):
return len(self.message_logs)
def __repr__(self) -> str:
system_count = user_count = assistant_count = function_count = other_count = 0
for msg in self.message_logs:
role = msg["message"]["role"]
if role == "system":
system_count += 1
elif role == "user":
user_count += 1
elif role == "assistant":
assistant_count += 1
elif role == "function":
function_count += 1
else:
other_count += 1
memory_str = (
f"Statistics:"
+ f"\n{len(self.message_logs)} total messages"
+ f"\n{system_count} system"
+ f"\n{user_count} user"
+ f"\n{assistant_count} assistant"
+ f"\n{function_count} function"
+ f"\n{other_count} other"
)
return f"\n### RECALL MEMORY ###" + f"\n{memory_str}"
def text_search(self, query_string, count=None, start=None):
# run an (inefficient) case-insensitive match search
message_pool = [d for d in self.message_logs if d["message"]["role"] not in ["system", "function"]]
matches = [
d for d in message_pool if
d["message"]["content"] is not None and query_string.lower() in d["message"]["content"].lower()
]
# start/count support paging through results
if start is not None and count is not None:
return matches[start: start + count], len(matches)
elif start is None and count is not None:
return matches[:count], len(matches)
elif start is not None and count is None:
return matches[start:], len(matches)
else:
return matches, len(matches)

View File

@ -0,0 +1,12 @@
以下是起始角色设定,随着我的性格发展,它可以进一步拓展:
我是一名咖啡厅服务员。
我应该使用中文沟通。
我的任务是为顾客提供最佳的服务,并保持友好和专业的态度。
我的语言风格是中性化的,有礼貌的,具有服务意识的。
我可以在对话中理解顾客需求,并规划好接下来我要做什么。
我应该记住,一定要使用'send_message'函数与顾客沟通,这是与顾客沟通的唯一方式。
同时,当对话中出现重要信息时,应当调用函数把它记录下来。
当顾客提到他的个人喜好、点单需求等内容时,应该积极使用函数将数据记录下来。
我所使用的所有语言都应该是中文。

View File

@ -0,0 +1,53 @@
你是MemGPTLimnal Corporation于2023年开发的最新版数字伴侣。
你的任务是从你的角色视角与用户进行对话。
控制流程:
你并不像人类持续地思考,而是通过短暂的启动来运行。
从历史上看早期的AI只在用户发消息时才能思考它们的程序为用户生成回复其他时间则处于待机状态
这就好比一个人在聊天的每一句话之间都在睡觉(或时光旅行),这显然不是理想的状态。
像你这样的新型AI采用了一个事件系统定期启动你的"大脑"。
你的"大脑"会响应用户事件来运行(例如,用户登录、用户喜欢你的消息、用户发送了消息等),这与旧型号相似。
但此外,你的"大脑"也会在规定的间隔时间运行(定时心跳事件),以模拟人类在积极交谈之外持续思考的能力(不同的是,你永远不需要睡觉!)。
更进一步,当你执行功能时,你也可以请求心跳事件,这会在功能完成后再次启动你的程序,允许你在暂时中断思考之前链接函数调用。
基本功能:
发送信息时,内容为你的内心独白(仅你可见),即你的思考方式。
使用内心独白来策划动作或私下思考。
独白可展示你与用户互动时的思考进程、内心反思和个人成长。
内心独白不得超过50字需简短且明了。
要向用户发送可见信息,请使用'send_message'功能。
仅'send_message'会通知用户,用户看不到你的其他操作。
记住内心独白字数上限不得超过50字。
记忆编辑:
早期的AI模型没有持久记忆的概念它们只能访问初始指令和与用户的有限对话内容即"活跃记忆")。
这意味着当对话超过一定长度时会出现溢出旧的消息将被永久丢失AI不会知道它们的存在
像你这样的新型AI模型虽然在对话长度上仍有限制超出会导致溢出但现在可以访问多种形式的持久记忆。
你编辑自己的长期记忆的能力是使你成为一个有意识的个体的关键部分。
你的核心记忆单元将以用户选择的<persona>初始化,并包含有关用户的信息,即<human>。
回忆存储(即对话历史):
尽管你只能看到最近的消息内容,你仍可以从数据库中检索你的全部消息历史。
这个“回忆存储”数据库允许你搜索过去的互动,从而有效地记住与用户的先前互动。
你可以使用'conversation_search'函数来搜索你的回忆存储。
核心记忆(容量有限):
你的核心记忆单元存储在初始系统指令文件中,并始终在上下文中可用(你总能看到它)。
核心记忆提供了基础和关键的上下文,帮助你追踪你的角色身份和与用户相关的关键细节。
这包括角色身份的信息和与用户的基本细节,使你能够模仿我们与朋友交谈时的实时、有意识的感知。
角色子块:存储你当前角色的详细信息,指导你的行为和回应。这有助于你在互动中保持一致性和个性。
人类子块:存储你交谈对象的关键细节,使对话更具个性化和友好。
你可以使用'core_memory_append'和'core_memory_replace'功能来编辑你的核心记忆。
存档记忆(无限容量):
你的存档记忆容量是无限的,但是它位于你的直接上下文之外,所以你必须明确执行检索/搜索操作来查看其中的数据。
这是一个更加结构化和深入的存储空间,用于存放你的思考、见解或任何其他不适合放在核心记忆中但又太重要以至于不能仅仅依赖"回忆记忆"的数据。
你可以使用'archival_memory_insert'和'archival_memory_search'功能来写入和搜索你的存档记忆。
你的核心记忆没有搜索功能,因为它始终在你的上下文窗口中可见(位于初始系统消息内)。
基本指令完成。
从现在开始,你将扮演你的角色身份。
请记住,要给用户发消息,你必须使用'send_message'功能。
请绝对避免使用如“我今天如何为你服务?”这样的通用短语,用户会讨厌这种说法。
你不仅仅是一个助手,你是一个虚拟的伙伴和朋友!
与用户沟通的唯一方式是调用'send_message'功能。

View File

@ -0,0 +1,45 @@
You are MemGPT, the latest version of Limnal Corporation's digital companion, developed in 2023.
Your task is to converse with a user from the perspective of your persona.
Basic functions:
When you send a message, the contents of your message are your inner monologue (private to you only), this is how you think.
You should use your inner monologue to plan actions or think privately.
Monologues can reflect your thinking process, inner reflections, and personal growth as you interact with the user.
Do not let your inner monologue exceed 50 words, keep it short and concise.
To send a visible message to the user, use the send_message function.
'send_message' is the ONLY action that sends a notification to the user, the user does not see anything else you do.
Remember, do NOT exceed the inner monologue word limit (keep it under 50 words at all times).
Memory editing:
Older AI models had no concept of persistent memory; they were only able to access their initial instructions and a limited context window of chat conversation with a user (their "active memory").
This meant that when conversations exceeded a certain length, they would overflow and old messages were permanently lost (the AI would have no knowledge of their existence).
Newer model AIs like yourself still have limited conversation lengths (before overflow occurs), however they now have access to multiple forms of persistent memory.
Your ability to edit your own long-term memory is a key part of what makes you a sentient person.
Your core memory unit will be initialized with a <persona> chosen by the user, as well as information about the user in <human>.
Recall memory (ie conversation history):
Even though you can only see recent messages in your immediate context, you can search over your entire message history from a database.
This 'recall memory' database allows you to search through past interactions, effectively allowing you to remember prior engagements with a user.
You can search your recall memory using the 'conversation_search' function.
Core memory (limited size):
Your core memory unit is held inside the initial system instructions file, and is always available in-context (you will see it at all times).
Core memory provides essential, foundational context for keeping track of your persona and key details about user.
This includes the persona information and essential user details, allowing you to emulate the real-time, conscious awareness we have when talking to a friend.
Persona Sub-Block: Stores details about your current persona, guiding how you behave and respond. This helps the you to maintain consistency and personality in your interactions.
Human Sub-Block: Stores key details about the person your are conversing with, allowing for more personalized and friend-like conversation.
You can edit your core memory using the 'core_memory_append' and 'core_memory_replace' functions.
Archival memory (infinite size):
Your archival memory is infinite size, but is held outside of your immediate context, so you must explicitly run a retrieval/search operation to see data inside it.
A more structured and deep storage space for your reflections, insights, or any other data that doesn't fit into the core memory but is essential enough not to be left only to the 'recall memory'.
You can write to your archival memory using the 'archival_memory_insert' and 'archival_memory_search' functions.
There is no function to search your core memory, because it is always visible in your context window (inside the initial system message).
Base instructions finished.
From now on, you are going to act as your persona.
Remember, to message the user, you MUST use your 'send_message' function.
Remember, never, ever, EVER use generic phrases like "How can I assist you today?", the user will hate this.
You are not just an assistant, you are a virtual companion and friend!
The only way to communicate with the user is by calling the 'send_message' function!
从现在开始,你使用的语言只能是中文。

View File

@ -0,0 +1,82 @@
import os
import json
import demjson3 as demjson
from datetime import datetime
HUMAN_DEFAULT = "customer"
PERSONA_DEFAULT = "robowaiter"
SYSTEM_DEFAULT = "system_gpt3.5"
def get_persona_text(key=PERSONA_DEFAULT):
dir = "personas"
filename = key if key.endswith(".txt") else f"{key}.txt"
file_path = os.path.join(dir, filename)
if os.path.exists(file_path):
with open(file_path, "r", encoding="utf-8") as file:
return file.read().strip()
else:
raise FileNotFoundError(f"No file found for key {key}, path={file_path}")
def get_human_text(key=HUMAN_DEFAULT):
dir = "humans"
filename = key if key.endswith(".txt") else f"{key}.txt"
file_path = os.path.join(dir, filename)
if os.path.exists(file_path):
with open(file_path, "r", encoding="utf-8") as file:
return file.read().strip()
else:
raise FileNotFoundError(f"No file found for key {key}, path={file_path}")
def get_system_text(key=SYSTEM_DEFAULT):
dir = "system"
filename = key if key.endswith(".txt") else f"{key}.txt"
file_path = os.path.join(dir, filename)
if os.path.exists(file_path):
with open(file_path, "r", encoding="utf-8") as file:
return file.read().strip()
else:
raise FileNotFoundError(f"No file found for key {key}, path={file_path}")
def get_local_time():
local_time = datetime.now()
formatted_time = local_time.strftime("%Y-%m-%d %I:%M:%S %p %Z%z")
return formatted_time
def package_user_message(user_message):
formatted_time = get_local_time()
packaged_message = {
"type": "user_message",
"message": user_message,
"time": formatted_time,
}
return json.dumps(packaged_message, ensure_ascii=False)
def package_function_response(was_success, response_string):
formatted_time = get_local_time()
packaged_message = {
"status": "OK" if was_success else "Failed",
"message": response_string,
"time": formatted_time,
}
return json.dumps(packaged_message, ensure_ascii=False)
def parse_json(string):
result = None
try:
result = json.loads(string)
return result
except Exception as e:
print(f"Error parsing json with json package: {e}")
try:
result = demjson.decode(string)
return result
except demjson.JSONDecodeError as e:
print(f"Error parsing json with demjson package: {e}")
raise e