From d5c8b240ccade76f17ce1511e6116b4850542de6 Mon Sep 17 00:00:00 2001 From: "yanyuxiyangzk@126.com" Date: Wed, 3 Apr 2024 22:02:23 +0800 Subject: [PATCH] vllm+chatglm3-6b --- app.py | 3 ++- llm/LLM.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/app.py b/app.py index 843dfd8..00553b1 100644 --- a/app.py +++ b/app.py @@ -125,7 +125,8 @@ def echo_socket(ws): def llm_response(message): from llm.LLM import LLM # llm = LLM().init_model('Gemini', model_path= 'gemini-pro',api_key='Your API Key', proxy_url=None) - llm = LLM().init_model('ChatGPT', model_path= 'gpt-3.5-turbo',api_key='Your API Key') + # llm = LLM().init_model('ChatGPT', model_path= 'gpt-3.5-turbo',api_key='Your API Key') + llm = LLM().init_model('VllmGPT', model_path= 'THUDM/chatglm3-6b') response = llm.chat(message) print(response) return response diff --git a/llm/LLM.py b/llm/LLM.py index 8695d0f..3157e8e 100644 --- a/llm/LLM.py +++ b/llm/LLM.py @@ -47,6 +47,6 @@ if __name__ == '__main__': # llm.test_Gemini(api_key='你的API Key', proxy_url=None) # llm = LLM().init_model('Gemini', model_path= 'gemini-pro',api_key='AIzaSyBWAWfT8zsyAZcRIXLS5Vzlw8KKCN9qsAg', proxy_url='http://172.31.71.58:7890') # response = llm.chat("如何应对压力?") - llm = LLM().init_model('VllmGPT', model_path= 'THUDM/chatglm3-6b',api_key='', proxy_url='http://172.31.71.58:7890') + llm = LLM().init_model('VllmGPT', model_path= 'THUDM/chatglm3-6b') response = llm.chat("如何应对压力?") # print(response) \ No newline at end of file