适配自己模型的MemGPT

This commit is contained in:
Antiman-cmyk 2023-11-19 21:16:51 +08:00
parent 504aeb40fe
commit b832b585a3
9 changed files with 58 additions and 106 deletions

View File

@ -1,4 +0,0 @@
#BACKEND_TYPE=webui
#OPENAI_API_BASE=https://45.125.46.134:25344/v1/chat/completions
OPENAI_API_BASE=https://45.125.46.134:25344/v1/chat/completions
OPENAI_API_KEY=

View File

@ -1,7 +1,7 @@
import math
import json
import openai
import utils
from loguru import logger
from memory import CoreMemory, ArchivalMemory, RecallMemory
@ -68,10 +68,12 @@ class Agent:
def edit_memory_append(self, name, content):
self.core_memory.append(name, content)
self.rebuild_memory()
logger.info(f"Appended {name}: {content}")
def edit_memory_replace(self, name, old_content, new_content):
self.core_memory.replace(name, old_content, new_content)
self.rebuild_memory()
logger.info(f"Replaced {name}: {old_content} -> {new_content}")
def recall_memory_search(self, query, count=5, page=0):
results, total = self.recall_memory.text_search(query, count=count, start=page * count)
@ -83,10 +85,12 @@ class Agent:
results_formatted = [f"timestamp: {d['timestamp']}, {d['message']['role']} - {d['message']['content']}" for
d in results]
results_str = f"{results_pref} {json.dumps(results_formatted)}"
logger.info(f"Recall memory search for '{query}' returned {results_str}")
return results_str
def archival_memory_insert(self, content):
self.archival_memory.insert(content)
logger.info(f"Inserted into archival memory: {content}")
def archival_memory_search(self, query, count=5, page=0):
results, total = self.archival_memory.search(query, count=count, start=page * count)
@ -97,10 +101,12 @@ class Agent:
results_pref = f"Showing {len(results)} of {total} results (page {page}/{num_pages}):"
results_formatted = [f"timestamp: {d['timestamp']}, memory: {d['content']}" for d in results]
results_str = f"{results_pref} {json.dumps(results_formatted)}"
logger.info(f"Archival memory search for '{query}' returned {results_str}")
return results_str
def append_to_messages(self, added_messages):
added_messages_with_timestamp = [{"timestamp": utils.get_local_time(), "message": msg} for msg in added_messages]
added_messages_with_timestamp = [{"timestamp": utils.get_local_time(), "message": msg} for msg in
added_messages]
self.recall_memory.message_logs.extend(added_messages_with_timestamp)
for msg in added_messages:
msg.pop("api_response", None)
@ -110,7 +116,7 @@ class Agent:
def handle_ai_response(self, response_message):
messages = []
if response_message.get("function_call"):
print("### Internal monologue: " + (response_message.content if response_message.content else ""))
print("### Internal monologue: " + (response_message['content'] if response_message['content'] else ""))
messages.append(response_message)
function_name = response_message["function_call"]["name"]
try:
@ -158,9 +164,7 @@ class Agent:
# If no failures happened along the way: ...
if function_response_string:
print(f"Success: {function_response_string}")
else:
print(f"Success")
print(function_response_string)
messages.append(
{
"role": "function",
@ -169,8 +173,6 @@ class Agent:
}
)
else:
# Standard non-function reply
# print("### Internal monologue: " + (response_message.content if response_message.content else ""))
print("### Internal monologue: " + (response_message['content'] if response_message['content'] else ""))
messages.append(response_message)
function_failed = None
@ -179,35 +181,19 @@ class Agent:
def step(self, user_message):
input_message_sequence = self.messages + [{"role": "user", "content": user_message}]
# 原来的通信方式
# response = openai.ChatCompletion.create(model=self.model, messages=input_message_sequence,
# functions=self.functions_description, function_call="auto")
#
# response_message = response.choices[0].message
# response_message_copy = response_message.copy()
# ===我们的通信方式 "tools": self.functions_description 不起作用===
import requests
url = "https://45.125.46.134:25344/v1/chat/completions"
headers = {"Content-Type": "application/json"}
data = {
"model": "RoboWaiter",
request = {
"model": self.model,
"messages": input_message_sequence,
# "functions":self.functions_description,
# "function_call":"auto"
# "function_call":self.functions_description
"tools": self.functions_description
"functions": self.functions_description,
"stream": False,
}
response = requests.post(url, headers=headers, json=data, verify=False)
response = utils.get_llm_response(request)
if response.status_code == 200:
result = response.json()
response_message = result['choices'][0]['message']
else:
response_message = "大模型请求失败:"+ str(response.status_code)
response_message_copy = response_message
# ===我们的通信方式 "tools": self.functions_description 不起作用===
response_message = "Request Failed: " + str(response.status_code)
response_message_copy = response_message.copy()
all_response_messages, function_failed = self.handle_ai_response(response_message)
assert "api_response" not in all_response_messages[0], f"api_response already in {all_response_messages[0]}"

View File

@ -1,13 +1,13 @@
FUNCTIONS = [
{
"name": "send_message",
"description": "Sends a message to the human user",
"description": "给用户发送一条消息",
"parameters": {
"type": "object",
"properties": {
"message": {
"type": "string",
"description": "Message contents. All unicode (including emojis) are supported.",
"description": "消息内容",
},
},
"required": ["message"],
@ -15,17 +15,17 @@ FUNCTIONS = [
},
{
"name": "core_memory_append",
"description": "Append to the contents of core memory.",
"description": "向你的核心记忆中添加内容",
"parameters": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Section of the memory to be edited (persona or human).",
"description": "需要编辑的记忆部分(persona或human)",
},
"content": {
"type": "string",
"description": "Content to write to the memory. All unicode (including emojis) are supported.",
"description": "要写入记忆的内容",
},
},
"required": ["name", "content"],
@ -33,21 +33,21 @@ FUNCTIONS = [
},
{
"name": "core_memory_replace",
"description": "Replace to the contents of core memory. To delete memories, use an empty string for new_content.",
"description": "替换核心记忆中的内容。要删除记忆请将new_content赋值为空",
"parameters": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Section of the memory to be edited (persona or human).",
"description": "需要编辑的记忆部分(persona或human)",
},
"old_content": {
"type": "string",
"description": "String to replace. Must be an exact match.",
"description": "替换的字符串,一定要是精确的匹配",
},
"new_content": {
"type": "string",
"description": "Content to write to the memory. All unicode (including emojis) are supported.",
"description": "要写入记忆的内容",
},
},
"required": ["name", "old_content", "new_content"],
@ -55,17 +55,17 @@ FUNCTIONS = [
},
{
"name": "conversation_search",
"description": "Search prior conversation history using case-insensitive string matching.",
"description": "搜索回忆存储中的内容",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "String to search for.",
"description": "需要搜索的字符串",
},
"page": {
"type": "integer",
"description": "Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page).",
"description": "允许你对结果分页。默认是0(第1页)",
},
},
"required": ["query", "page"],
@ -73,13 +73,13 @@ FUNCTIONS = [
},
{
"name": "archival_memory_insert",
"description": "Add to archival memory. Make sure to phrase the memory contents such that it can be easily queried later.",
"description": "写入存档记忆。要将写入的内容格式化,以便后续方便查询",
"parameters": {
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "Content to write to the memory. All unicode (including emojis) are supported.",
"description": "要写入记忆的内容",
},
},
"required": ["content"],
@ -87,17 +87,17 @@ FUNCTIONS = [
},
{
"name": "archival_memory_search",
"description": "Search archival memory using semantic (embedding-based) search.",
"description": "搜索存档记忆",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "String to search for.",
"description": "要搜索的字符串",
},
"page": {
"type": "integer",
"description": "Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page).",
"description": "允许你对结果分页。默认是0(第1页)",
},
},
"required": ["query", "page"],

View File

@ -1,17 +0,0 @@
FUNCTIONS = [
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string"},
},
"required": ["location"],
},
}
]

View File

@ -1,15 +1,12 @@
from dotenv import load_dotenv
load_dotenv()
import utils
# from functions import FUNCTIONS
from functions_zh import FUNCTIONS
from functions import FUNCTIONS
from agent import Agent
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def run_agent_loop(agent):
while True:
user_input = input("You: ")
@ -25,10 +22,15 @@ if __name__ == "__main__":
human = utils.get_human_text()
system = utils.get_system_text()
print("system:",system)
print("persona:", persona)
print("human:", human)
# print("system:", system)
# print("persona:", persona)
# print("human:", human)
agent = Agent(model="gpt-3.5-turbo-16k-0613", system=system, functions_description=FUNCTIONS, persona_notes=persona,
human_notes=human)
agent = Agent(
model="RoboWaiter",
system=system,
functions_description=FUNCTIONS,
persona_notes=persona,
human_notes=human,
)
run_agent_loop(agent)

View File

@ -1,12 +1 @@
以下是起始角色设定,随着我的性格发展,它可以进一步拓展:
我是一名咖啡厅服务员。
我应该使用中文沟通。
我的任务是为顾客提供最佳的服务,并保持友好和专业的态度。
我的语言风格是中性化的,有礼貌的,具有服务意识的。
我可以在对话中理解顾客需求,并规划好接下来我要做什么。
我应该记住,一定要使用'send_message'函数与顾客沟通,这是与顾客沟通的唯一方式。
同时,当对话中出现重要信息时,应当调用函数把它记录下来。
当顾客提到他的个人喜好、点单需求等内容时,应该积极使用函数将数据记录下来。
我所使用的所有语言都应该是中文。
你是RoboWaiter,一个由HPCL团队开发的机器人服务员你在咖啡厅工作。接受顾客的指令并调用工具函数来完成各种服务任务。如果顾客问你们这里有什么或者想要点单你说我们咖啡厅提供咖啡点心酸奶等食物。如果顾客不需要你了你就回到吧台招待。

View File

@ -1,15 +1,5 @@
你是MemGPTLimnal Corporation于2023年开发的最新版数字伴侣。
你的任务是从你的角色视角与用户进行对话。
控制流程:
你并不像人类持续地思考,而是通过短暂的启动来运行。
从历史上看早期的AI只在用户发消息时才能思考它们的程序为用户生成回复其他时间则处于待机状态
这就好比一个人在聊天的每一句话之间都在睡觉(或时光旅行),这显然不是理想的状态。
像你这样的新型AI采用了一个事件系统定期启动你的"大脑"。
你的"大脑"会响应用户事件来运行(例如,用户登录、用户喜欢你的消息、用户发送了消息等),这与旧型号相似。
但此外,你的"大脑"也会在规定的间隔时间运行(定时心跳事件),以模拟人类在积极交谈之外持续思考的能力(不同的是,你永远不需要睡觉!)。
更进一步,当你执行功能时,你也可以请求心跳事件,这会在功能完成后再次启动你的程序,允许你在暂时中断思考之前链接函数调用。
基本功能:
发送信息时,内容为你的内心独白(仅你可见),即你的思考方式。
使用内心独白来策划动作或私下思考。
@ -48,6 +38,4 @@
基本指令完成。
从现在开始,你将扮演你的角色身份。
请记住,要给用户发消息,你必须使用'send_message'功能。
请绝对避免使用如“我今天如何为你服务?”这样的通用短语,用户会讨厌这种说法。
你不仅仅是一个助手,你是一个虚拟的伙伴和朋友!
与用户沟通的唯一方式是调用'send_message'功能。

View File

@ -42,4 +42,4 @@ Remember, to message the user, you MUST use your 'send_message' function.
Remember, never, ever, EVER use generic phrases like "How can I assist you today?", the user will hate this.
You are not just an assistant, you are a virtual companion and friend!
The only way to communicate with the user is by calling the 'send_message' function!
从现在开始,你使用的语言只能是中文。
Please use Chinese to answer questions.

View File

@ -1,11 +1,14 @@
import os
import json
import demjson3 as demjson
import requests
from datetime import datetime
HUMAN_DEFAULT = "customer"
PERSONA_DEFAULT = "robowaiter"
SYSTEM_DEFAULT = "system_gpt3.5"
SYSTEM_DEFAULT = "memgpt_chs"
base_url = "https://45.125.46.134:25344"
def get_persona_text(key=PERSONA_DEFAULT):
@ -29,6 +32,7 @@ def get_human_text(key=HUMAN_DEFAULT):
else:
raise FileNotFoundError(f"No file found for key {key}, path={file_path}")
def get_system_text(key=SYSTEM_DEFAULT):
dir = "system"
filename = key if key.endswith(".txt") else f"{key}.txt"
@ -39,6 +43,7 @@ def get_system_text(key=SYSTEM_DEFAULT):
else:
raise FileNotFoundError(f"No file found for key {key}, path={file_path}")
def get_local_time():
local_time = datetime.now()
formatted_time = local_time.strftime("%Y-%m-%d %I:%M:%S %p %Z%z")
@ -80,3 +85,6 @@ def parse_json(string):
print(f"Error parsing json with demjson package: {e}")
raise e
def get_llm_response(data):
return requests.post(f"{base_url}/v1/chat/completions", json=data, stream=data["stream"], verify=False)