diff --git a/llm/LLM.py b/llm/LLM.py index 50bd79c..8695d0f 100644 --- a/llm/LLM.py +++ b/llm/LLM.py @@ -1,7 +1,7 @@ from llm.Qwen import Qwen from llm.Gemini import Gemini from llm.ChatGPT import ChatGPT - +from llm.VllmGPT import VllmGPT def test_Qwen(question = "如何应对压力?", mode='offline', model_path="Qwen/Qwen-1_8B-Chat"): llm = Qwen(mode, model_path) @@ -18,8 +18,8 @@ class LLM: self.mode = mode def init_model(self, model_name, model_path, api_key=None, proxy_url=None): - if model_name not in ['Qwen', 'Gemini', 'ChatGPT']: - raise ValueError("model_name must be 'ChatGPT', 'Qwen', or 'Gemini'(其他模型还未集成)") + if model_name not in ['Qwen', 'Gemini', 'ChatGPT', 'VllmGPT']: + raise ValueError("model_name must be 'ChatGPT', 'VllmGPT', 'Qwen', or 'Gemini'(其他模型还未集成)") if model_name == 'Gemini': llm = Gemini(model_path, api_key, proxy_url) @@ -27,6 +27,8 @@ class LLM: llm = ChatGPT(model_path, api_key=api_key) elif model_name == 'Qwen': llm = Qwen(self.mode, model_path) + elif model_name == 'VllmGPT': + llm = VllmGPT() return llm @@ -41,8 +43,10 @@ class LLM: print(answer) if __name__ == '__main__': - llm = LLM() - llm.test_Gemini(api_key='你的API Key', proxy_url=None) + # llm = LLM() + # llm.test_Gemini(api_key='你的API Key', proxy_url=None) # llm = LLM().init_model('Gemini', model_path= 'gemini-pro',api_key='AIzaSyBWAWfT8zsyAZcRIXLS5Vzlw8KKCN9qsAg', proxy_url='http://172.31.71.58:7890') # response = llm.chat("如何应对压力?") + llm = LLM().init_model('VllmGPT', model_path= 'THUDM/chatglm3-6b',api_key='', proxy_url='http://172.31.71.58:7890') + response = llm.chat("如何应对压力?") # print(response) \ No newline at end of file diff --git a/llm/VllmGPT.py b/llm/VllmGPT.py index 6e70096..a8db6aa 100644 --- a/llm/VllmGPT.py +++ b/llm/VllmGPT.py @@ -4,8 +4,8 @@ import requests class VllmGPT: - def __init__(self, host="127.0.0.1", - port="8000", + def __init__(self, host="192.168.1.3", + port="8101", model="THUDM/chatglm3-6b", max_tokens="1024"): self.host = host