From 18b5b8b204b8953b24e58b68673e90aefc8c7b50 Mon Sep 17 00:00:00 2001 From: Kedreamix Date: Sat, 27 Jan 2024 19:38:13 +0800 Subject: [PATCH] =?UTF-8?q?=E5=8A=A0=E5=85=A5LLM=E9=97=AE=E7=AD=94?= =?UTF-8?q?=EF=BC=8C=E5=A6=82ChatGPT?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app.py | 10 ++++++++-- llm/ChatGPT.py | 16 ++++++++++++++++ llm/Gemini.py | 45 +++++++++++++++++++++++++++++++++++++++++++++ llm/LLM.py | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ llm/Qwen.py | 37 +++++++++++++++++++++++++++++++++++++ 5 files changed, 154 insertions(+), 2 deletions(-) create mode 100644 llm/ChatGPT.py create mode 100644 llm/Gemini.py create mode 100644 llm/LLM.py create mode 100644 llm/Qwen.py diff --git a/app.py b/app.py index bc7a730..0ce8d8f 100644 --- a/app.py +++ b/app.py @@ -38,7 +38,13 @@ async def main(voicename: str, text: str, render): #file.write(chunk["data"]) elif chunk["type"] == "WordBoundary": pass - +from llm.LLM import * +def llm_response(message): + # llm = LLM().init_model('Gemini', model_path= 'gemini-pro',api_key='Your API Key', proxy_url=None) + llm = LLM().init_model('ChatGPT', model_path= 'gpt-3.5-turbo',api_key='Your API Key') + response = llm.chat(message) + print(response) + return response def txt_to_audio(text_): audio_list = [] @@ -85,7 +91,7 @@ def chat_socket(ws): if len(message)==0: return '输入信息为空' else: - res=llm(message) + res=llm_response(message) txt_to_audio(res) def render(): diff --git a/llm/ChatGPT.py b/llm/ChatGPT.py new file mode 100644 index 0000000..eed1631 --- /dev/null +++ b/llm/ChatGPT.py @@ -0,0 +1,16 @@ +import openai + + +class ChatGPT(): + def __init__(self, model_path = 'gpt-3.5-turbo', api_key = None): + openai.api_key = api_key + self.model_path = model_path + + def chat(self, message): + response = openai.ChatCompletion.create( + model=self.model_path, + messages=[ + {"role": "user", "content": message} + ] + ) + return response['choices'][0]['message']['content'] \ No newline at end of file diff --git a/llm/Gemini.py b/llm/Gemini.py new file mode 100644 index 0000000..aa4bacf --- /dev/null +++ b/llm/Gemini.py @@ -0,0 +1,45 @@ +import os +import google.generativeai as genai + + +def configure_api(api_key, proxy_url=None): + os.environ['https_proxy'] = proxy_url if proxy_url else None + os.environ['http_proxy'] = proxy_url if proxy_url else None + genai.configure(api_key=api_key) + + +class Gemini: + def __init__(self, model_path='gemini-pro', api_key=None, proxy=None): + configure_api(api_key, proxy) + safety_settings = [ + { + "category": "HARM_CATEGORY_HARASSMENT", + "threshold": "BLOCK_MEDIUM_AND_ABOVE" + }, + { + "category": "HARM_CATEGORY_HATE_SPEECH", + "threshold": "BLOCK_MEDIUM_AND_ABOVE" + }, + { + "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", + "threshold": "BLOCK_MEDIUM_AND_ABOVE" + }, + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "threshold": "BLOCK_MEDIUM_AND_ABOVE" + }, + ] + + self.model = genai.GenerativeModel(model_path, safety_settings=safety_settings) + + def chat(self, message): + times = 0 + while True: + try: + response = self.model.generate_content(message) + return response.text + except: + times += 1 + if times > 5: + raise Exception('Failed to generate text.') + \ No newline at end of file diff --git a/llm/LLM.py b/llm/LLM.py new file mode 100644 index 0000000..50bd79c --- /dev/null +++ b/llm/LLM.py @@ -0,0 +1,48 @@ +from llm.Qwen import Qwen +from llm.Gemini import Gemini +from llm.ChatGPT import ChatGPT + + +def test_Qwen(question = "如何应对压力?", mode='offline', model_path="Qwen/Qwen-1_8B-Chat"): + llm = Qwen(mode, model_path) + answer = llm.generate(question) + print(answer) + +def test_Gemini(question = "如何应对压力?", model_path='gemini-pro', api_key=None, proxy_url=None): + llm = Gemini(model_path, api_key, proxy_url) + answer = llm.generate(question) + print(answer) + +class LLM: + def __init__(self, mode='offline'): + self.mode = mode + + def init_model(self, model_name, model_path, api_key=None, proxy_url=None): + if model_name not in ['Qwen', 'Gemini', 'ChatGPT']: + raise ValueError("model_name must be 'ChatGPT', 'Qwen', or 'Gemini'(其他模型还未集成)") + + if model_name == 'Gemini': + llm = Gemini(model_path, api_key, proxy_url) + elif model_name == 'ChatGPT': + llm = ChatGPT(model_path, api_key=api_key) + elif model_name == 'Qwen': + llm = Qwen(self.mode, model_path) + return llm + + + def test_Qwen(self, question="如何应对压力?", model_path="Qwen/Qwen-1_8B-Chat"): + llm = Qwen(self.mode, model_path) + answer = llm.generate(question) + print(answer) + + def test_Gemini(self, question="如何应对压力?", model_path='gemini-pro', api_key=None, proxy_url=None): + llm = Gemini(model_path, api_key, proxy_url) + answer = llm.chat(question) + print(answer) + +if __name__ == '__main__': + llm = LLM() + llm.test_Gemini(api_key='你的API Key', proxy_url=None) + # llm = LLM().init_model('Gemini', model_path= 'gemini-pro',api_key='AIzaSyBWAWfT8zsyAZcRIXLS5Vzlw8KKCN9qsAg', proxy_url='http://172.31.71.58:7890') + # response = llm.chat("如何应对压力?") + # print(response) \ No newline at end of file diff --git a/llm/Qwen.py b/llm/Qwen.py new file mode 100644 index 0000000..6c79b31 --- /dev/null +++ b/llm/Qwen.py @@ -0,0 +1,37 @@ +import os +import torch +import requests +from transformers import AutoModelForCausalLM, AutoTokenizer +os.environ['CUDA_LAUNCH_BLOCKING'] = '1' + +class Qwen: + def __init__(self, model_path="Qwen/Qwen-1_8B-Chat") -> None: + '''暂时不写api版本,与Linly-api相类似,感兴趣可以实现一下''' + self.model, self.tokenizer = self.init_model(model_path) + + def init_model(self, path = "Qwen/Qwen-1_8B-Chat"): + model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-1_8B-Chat", + device_map="auto", + trust_remote_code=True).eval() + tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True) + + return model, tokenizer + + def chat(self, question): + + self.data["question"] = f"{self.prompt} ### Instruction:{question} ### Response:" + try: + response, history = self.model.chat(self.tokenizer, self.data["question"], history=None) + print(history) + return response + except: + return "对不起,你的请求出错了,请再次尝试。\nSorry, your request has encountered an error. Please try again.\n" + + +def test(): + llm = Qwen(model_path="Qwen/Qwen-1_8B-Chat") + answer = llm.generate("如何应对压力?") + print(answer) + +if __name__ == '__main__': + test()