加入LLM问答,如ChatGPT

This commit is contained in:
Kedreamix 2024-01-27 19:38:13 +08:00
parent 56338be013
commit 18b5b8b204
5 changed files with 154 additions and 2 deletions

10
app.py
View File

@ -38,7 +38,13 @@ async def main(voicename: str, text: str, render):
#file.write(chunk["data"])
elif chunk["type"] == "WordBoundary":
pass
from llm.LLM import *
def llm_response(message):
# llm = LLM().init_model('Gemini', model_path= 'gemini-pro',api_key='Your API Key', proxy_url=None)
llm = LLM().init_model('ChatGPT', model_path= 'gpt-3.5-turbo',api_key='Your API Key')
response = llm.chat(message)
print(response)
return response
def txt_to_audio(text_):
audio_list = []
@ -85,7 +91,7 @@ def chat_socket(ws):
if len(message)==0:
return '输入信息为空'
else:
res=llm(message)
res=llm_response(message)
txt_to_audio(res)
def render():

16
llm/ChatGPT.py Normal file
View File

@ -0,0 +1,16 @@
import openai
class ChatGPT():
def __init__(self, model_path = 'gpt-3.5-turbo', api_key = None):
openai.api_key = api_key
self.model_path = model_path
def chat(self, message):
response = openai.ChatCompletion.create(
model=self.model_path,
messages=[
{"role": "user", "content": message}
]
)
return response['choices'][0]['message']['content']

45
llm/Gemini.py Normal file
View File

@ -0,0 +1,45 @@
import os
import google.generativeai as genai
def configure_api(api_key, proxy_url=None):
os.environ['https_proxy'] = proxy_url if proxy_url else None
os.environ['http_proxy'] = proxy_url if proxy_url else None
genai.configure(api_key=api_key)
class Gemini:
def __init__(self, model_path='gemini-pro', api_key=None, proxy=None):
configure_api(api_key, proxy)
safety_settings = [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
]
self.model = genai.GenerativeModel(model_path, safety_settings=safety_settings)
def chat(self, message):
times = 0
while True:
try:
response = self.model.generate_content(message)
return response.text
except:
times += 1
if times > 5:
raise Exception('Failed to generate text.')

48
llm/LLM.py Normal file
View File

@ -0,0 +1,48 @@
from llm.Qwen import Qwen
from llm.Gemini import Gemini
from llm.ChatGPT import ChatGPT
def test_Qwen(question = "如何应对压力?", mode='offline', model_path="Qwen/Qwen-1_8B-Chat"):
llm = Qwen(mode, model_path)
answer = llm.generate(question)
print(answer)
def test_Gemini(question = "如何应对压力?", model_path='gemini-pro', api_key=None, proxy_url=None):
llm = Gemini(model_path, api_key, proxy_url)
answer = llm.generate(question)
print(answer)
class LLM:
def __init__(self, mode='offline'):
self.mode = mode
def init_model(self, model_name, model_path, api_key=None, proxy_url=None):
if model_name not in ['Qwen', 'Gemini', 'ChatGPT']:
raise ValueError("model_name must be 'ChatGPT', 'Qwen', or 'Gemini'(其他模型还未集成)")
if model_name == 'Gemini':
llm = Gemini(model_path, api_key, proxy_url)
elif model_name == 'ChatGPT':
llm = ChatGPT(model_path, api_key=api_key)
elif model_name == 'Qwen':
llm = Qwen(self.mode, model_path)
return llm
def test_Qwen(self, question="如何应对压力?", model_path="Qwen/Qwen-1_8B-Chat"):
llm = Qwen(self.mode, model_path)
answer = llm.generate(question)
print(answer)
def test_Gemini(self, question="如何应对压力?", model_path='gemini-pro', api_key=None, proxy_url=None):
llm = Gemini(model_path, api_key, proxy_url)
answer = llm.chat(question)
print(answer)
if __name__ == '__main__':
llm = LLM()
llm.test_Gemini(api_key='你的API Key', proxy_url=None)
# llm = LLM().init_model('Gemini', model_path= 'gemini-pro',api_key='AIzaSyBWAWfT8zsyAZcRIXLS5Vzlw8KKCN9qsAg', proxy_url='http://172.31.71.58:7890')
# response = llm.chat("如何应对压力?")
# print(response)

37
llm/Qwen.py Normal file
View File

@ -0,0 +1,37 @@
import os
import torch
import requests
from transformers import AutoModelForCausalLM, AutoTokenizer
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
class Qwen:
def __init__(self, model_path="Qwen/Qwen-1_8B-Chat") -> None:
'''暂时不写api版本,与Linly-api相类似,感兴趣可以实现一下'''
self.model, self.tokenizer = self.init_model(model_path)
def init_model(self, path = "Qwen/Qwen-1_8B-Chat"):
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-1_8B-Chat",
device_map="auto",
trust_remote_code=True).eval()
tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
return model, tokenizer
def chat(self, question):
self.data["question"] = f"{self.prompt} ### Instruction:{question} ### Response:"
try:
response, history = self.model.chat(self.tokenizer, self.data["question"], history=None)
print(history)
return response
except:
return "对不起,你的请求出错了,请再次尝试。\nSorry, your request has encountered an error. Please try again.\n"
def test():
llm = Qwen(model_path="Qwen/Qwen-1_8B-Chat")
answer = llm.generate("如何应对压力?")
print(answer)
if __name__ == '__main__':
test()