vllm代码提交
This commit is contained in:
parent
4fc13ed714
commit
607be33781
|
@ -25,6 +25,10 @@ curl -X POST "http://127.0.0.1:8101/v1/completions" \
|
|||
-H "Content-Type: application/json" \
|
||||
-d "{\"model\": \"THUDM/chatglm3-6b\",\"prompt\": \"你叫什么名字\", \"history\": [{\"role\": \"user\", \"content\": \"你出生在哪里.\"}, {\"role\": \"assistant\", \"content\": \"出生在北京\"}]}"
|
||||
|
||||
多轮对话
|
||||
curl -X POST "http://127.0.0.1:8101/v1/chat/completions" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"model\": \"THUDM/chatglm3-6b\", \"messages\": [{\"role\": \"system\", \"content\": \"You are ChatGLM3, a large language model trained by Zhipu.AI. Follow the user's instructions carefully. Respond using markdown.\"}, {\"role\": \"user\", \"content\": \"你好,给我讲一个故事,大概100字\"}], \"stream\": false, \"max_tokens\": 100, \"temperature\": 0.8, \"top_p\": 0.8}"
|
||||
|
||||
|
||||
|
||||
|
|
105
llm/VllmGPT.py
105
llm/VllmGPT.py
|
@ -2,37 +2,80 @@ import json
|
|||
import requests
|
||||
# from core import content_db
|
||||
|
||||
class VllmGPT:
|
||||
|
||||
def question(cont):
|
||||
chat_list = []
|
||||
# contentdb = content_db.new_instance()
|
||||
# list = contentdb.get_list('all','desc',11)
|
||||
# answer_info = dict()
|
||||
# chat_list = []
|
||||
# i = len(list)-1
|
||||
# while i >= 0:
|
||||
# answer_info = dict()
|
||||
# if list[i][0] == "member":
|
||||
# answer_info["role"] = "user"
|
||||
# answer_info["content"] = list[i][2]
|
||||
# elif list[i][0] == "fay":
|
||||
# answer_info["role"] = "bot"
|
||||
# answer_info["content"] = list[i][2]
|
||||
# chat_list.append(answer_info)
|
||||
# i -= 1
|
||||
content = {
|
||||
"model": "THUDM/chatglm3-6b",
|
||||
"prompt":"请简单回复我。" + cont,
|
||||
"history":chat_list}
|
||||
url = "http://192.168.1.3:8101/v1/completions"
|
||||
req = json.dumps(content)
|
||||
|
||||
headers = {'content-type': 'application/json'}
|
||||
r = requests.post(url, headers=headers, data=req)
|
||||
res = json.loads(r.text)
|
||||
|
||||
return res['choices'][0]['text']
|
||||
def __init__(self, host="127.0.0.1",
|
||||
port="8000",
|
||||
model="THUDM/chatglm3-6b",
|
||||
max_tokens="1024"):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.model=model
|
||||
self.max_tokens=max_tokens
|
||||
self.__URL = "http://{}:{}/v1/completions".format(self.host, self.port)
|
||||
self.__URL2 = "http://{}:{}/v1/chat/completions".format(self.host, self.port)
|
||||
|
||||
def question(self,cont):
|
||||
chat_list = []
|
||||
# contentdb = content_db.new_instance()
|
||||
# list = contentdb.get_list('all','desc',11)
|
||||
# answer_info = dict()
|
||||
# chat_list = []
|
||||
# i = len(list)-1
|
||||
# while i >= 0:
|
||||
# answer_info = dict()
|
||||
# if list[i][0] == "member":
|
||||
# answer_info["role"] = "user"
|
||||
# answer_info["content"] = list[i][2]
|
||||
# elif list[i][0] == "fay":
|
||||
# answer_info["role"] = "bot"
|
||||
# answer_info["content"] = list[i][2]
|
||||
# chat_list.append(answer_info)
|
||||
# i -= 1
|
||||
content = {
|
||||
"model": self.model,
|
||||
"prompt":"请简单回复我。" + cont,
|
||||
"history":chat_list}
|
||||
url = self.__URL
|
||||
req = json.dumps(content)
|
||||
|
||||
headers = {'content-type': 'application/json'}
|
||||
r = requests.post(url, headers=headers, data=req)
|
||||
res = json.loads(r.text)
|
||||
|
||||
return res['choices'][0]['text']
|
||||
|
||||
def question2(self,cont):
|
||||
chat_list = []
|
||||
# contentdb = content_db.new_instance()
|
||||
# list = contentdb.get_list('all','desc',11)
|
||||
# answer_info = dict()
|
||||
# chat_list = []
|
||||
# i = len(list)-1
|
||||
# while i >= 0:
|
||||
# answer_info = dict()
|
||||
# if list[i][0] == "member":
|
||||
# answer_info["role"] = "user"
|
||||
# answer_info["content"] = list[i][2]
|
||||
# elif list[i][0] == "fay":
|
||||
# answer_info["role"] = "bot"
|
||||
# answer_info["content"] = list[i][2]
|
||||
# chat_list.append(answer_info)
|
||||
# i -= 1
|
||||
content = {
|
||||
"model": self.model,
|
||||
"prompt":"请简单回复我。" + cont,
|
||||
"history":chat_list}
|
||||
url = self.__URL2
|
||||
req = json.dumps(content)
|
||||
|
||||
headers = {'content-type': 'application/json'}
|
||||
r = requests.post(url, headers=headers, data=req)
|
||||
res = json.loads(r.text)
|
||||
|
||||
return res['choices'][0]['message']['content']
|
||||
|
||||
if __name__ == "__main__":
|
||||
req = question("你叫什么名字啊今年多大了")
|
||||
print(req)
|
||||
vllm = VllmGPT('192.168.1.3','8101')
|
||||
req = vllm.question("你叫什么名字啊今年多大了")
|
||||
print(req)
|
||||
|
|
Loading…
Reference in New Issue