add qwen openapi
This commit is contained in:
parent
54fcbb8cc7
commit
3674875095
|
@ -26,7 +26,7 @@ class LLM:
|
|||
elif model_name == 'ChatGPT':
|
||||
llm = ChatGPT(model_path, api_key=api_key)
|
||||
elif model_name == 'Qwen':
|
||||
llm = Qwen(model_path)
|
||||
llm = Qwen(model_path=model_path, api_key=api_key, api_base=proxy_url)
|
||||
elif model_name == 'VllmGPT':
|
||||
llm = VllmGPT()
|
||||
return llm
|
||||
|
|
41
llm/Qwen.py
41
llm/Qwen.py
|
@ -1,12 +1,29 @@
|
|||
import os
|
||||
import torch
|
||||
import requests
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
import openai
|
||||
|
||||
'''
|
||||
`huggingface`连接不上可以使用 `modelscope`
|
||||
`pip install modelscope`
|
||||
'''
|
||||
from modelscope import AutoModelForCausalLM, AutoTokenizer
|
||||
#from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
|
||||
|
||||
|
||||
class Qwen:
|
||||
def __init__(self, model_path="Qwen/Qwen-1_8B-Chat") -> None:
|
||||
def __init__(self, model_path="Qwen/Qwen-1_8B-Chat", api_base=None, api_key=None) -> None:
|
||||
'''暂时不写api版本,与Linly-api相类似,感兴趣可以实现一下'''
|
||||
# 默认本地推理
|
||||
self.local = True
|
||||
|
||||
# api_base和api_key不为空时使用openapi的方式
|
||||
if api_key is not None and base_url is not None:
|
||||
openai.api_base = api_base
|
||||
openai.api_key = api_key
|
||||
self.local = False
|
||||
return
|
||||
|
||||
self.model, self.tokenizer = self.init_model(model_path)
|
||||
self.data = {}
|
||||
|
||||
|
@ -19,7 +36,20 @@ class Qwen:
|
|||
return model, tokenizer
|
||||
|
||||
def chat(self, question):
|
||||
# 优先调用qwen openapi的方式
|
||||
if not self.local:
|
||||
# 不使用流式回复的请求
|
||||
response = openai.ChatCompletion.create(
|
||||
model="Qwen",
|
||||
messages=[
|
||||
{"role": "user", "content": question}
|
||||
],
|
||||
stream=False,
|
||||
stop=[]
|
||||
)
|
||||
return response.choices[0].message.content
|
||||
|
||||
# 默认本地推理
|
||||
self.data["question"] = f"{question} ### Instruction:{question} ### Response:"
|
||||
try:
|
||||
response, history = self.model.chat(self.tokenizer, self.data["question"], history=None)
|
||||
|
@ -28,11 +58,12 @@ class Qwen:
|
|||
except:
|
||||
return "对不起,你的请求出错了,请再次尝试。\nSorry, your request has encountered an error. Please try again.\n"
|
||||
|
||||
|
||||
|
||||
def test():
|
||||
llm = Qwen(model_path="Qwen/Qwen-1_8B-Chat")
|
||||
answer = llm.chat(question="如何应对压力?")
|
||||
print(answer)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
|
|
Loading…
Reference in New Issue