2023-11-12 14:36:41 +08:00
|
|
|
|
import os
|
2023-10-25 10:34:24 +08:00
|
|
|
|
|
|
|
|
|
import requests
|
|
|
|
|
import urllib3
|
2023-11-09 21:52:13 +08:00
|
|
|
|
from robowaiter.utils import get_root_path
|
|
|
|
|
from robowaiter.llm_client.single_round import single_round
|
2023-11-15 14:30:57 +08:00
|
|
|
|
from robowaiter.llm_client.tool_api import run_conversation
|
|
|
|
|
|
2023-10-25 10:34:24 +08:00
|
|
|
|
########################################
|
|
|
|
|
# 该文件实现了与大模型的简单通信
|
|
|
|
|
########################################
|
|
|
|
|
|
|
|
|
|
# 忽略https的安全性警告
|
|
|
|
|
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
|
|
|
|
|
2023-11-09 21:52:13 +08:00
|
|
|
|
root_path = get_root_path()
|
|
|
|
|
# load test questions
|
2023-11-12 14:36:41 +08:00
|
|
|
|
file_path = os.path.join(root_path,"robowaiter/llm_client/data/test_questions.txt")
|
2023-11-09 21:52:13 +08:00
|
|
|
|
|
2023-11-12 14:36:41 +08:00
|
|
|
|
with open(file_path,'r',encoding="utf-8") as f:
|
|
|
|
|
test_questions_dict = eval(f.read())
|
2023-11-09 21:52:13 +08:00
|
|
|
|
|
2023-10-25 10:34:24 +08:00
|
|
|
|
def ask_llm(question):
|
2023-11-12 14:36:41 +08:00
|
|
|
|
if question in test_questions_dict:
|
2023-11-14 20:08:54 +08:00
|
|
|
|
ans = test_questions_dict[question]
|
|
|
|
|
else:
|
2023-11-15 14:30:57 +08:00
|
|
|
|
ans = run_conversation(question, stream=False)
|
|
|
|
|
|
|
|
|
|
# ans = single_round(question)
|
2023-11-14 20:08:54 +08:00
|
|
|
|
print(f"大模型输出: {ans}")
|
2023-11-09 21:52:13 +08:00
|
|
|
|
return ans
|
2023-10-25 10:34:24 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2023-11-12 14:36:41 +08:00
|
|
|
|
question = '''测试VLM:做一杯咖啡'''
|
2023-10-25 10:34:24 +08:00
|
|
|
|
|
|
|
|
|
print(ask_llm(question))
|