vllm+chatglm3-6b
This commit is contained in:
parent
fe963ed543
commit
d5c8b240cc
3
app.py
3
app.py
|
@ -125,7 +125,8 @@ def echo_socket(ws):
|
||||||
def llm_response(message):
|
def llm_response(message):
|
||||||
from llm.LLM import LLM
|
from llm.LLM import LLM
|
||||||
# llm = LLM().init_model('Gemini', model_path= 'gemini-pro',api_key='Your API Key', proxy_url=None)
|
# llm = LLM().init_model('Gemini', model_path= 'gemini-pro',api_key='Your API Key', proxy_url=None)
|
||||||
llm = LLM().init_model('ChatGPT', model_path= 'gpt-3.5-turbo',api_key='Your API Key')
|
# llm = LLM().init_model('ChatGPT', model_path= 'gpt-3.5-turbo',api_key='Your API Key')
|
||||||
|
llm = LLM().init_model('VllmGPT', model_path= 'THUDM/chatglm3-6b')
|
||||||
response = llm.chat(message)
|
response = llm.chat(message)
|
||||||
print(response)
|
print(response)
|
||||||
return response
|
return response
|
||||||
|
|
|
@ -47,6 +47,6 @@ if __name__ == '__main__':
|
||||||
# llm.test_Gemini(api_key='你的API Key', proxy_url=None)
|
# llm.test_Gemini(api_key='你的API Key', proxy_url=None)
|
||||||
# llm = LLM().init_model('Gemini', model_path= 'gemini-pro',api_key='AIzaSyBWAWfT8zsyAZcRIXLS5Vzlw8KKCN9qsAg', proxy_url='http://172.31.71.58:7890')
|
# llm = LLM().init_model('Gemini', model_path= 'gemini-pro',api_key='AIzaSyBWAWfT8zsyAZcRIXLS5Vzlw8KKCN9qsAg', proxy_url='http://172.31.71.58:7890')
|
||||||
# response = llm.chat("如何应对压力?")
|
# response = llm.chat("如何应对压力?")
|
||||||
llm = LLM().init_model('VllmGPT', model_path= 'THUDM/chatglm3-6b',api_key='', proxy_url='http://172.31.71.58:7890')
|
llm = LLM().init_model('VllmGPT', model_path= 'THUDM/chatglm3-6b')
|
||||||
response = llm.chat("如何应对压力?")
|
response = llm.chat("如何应对压力?")
|
||||||
# print(response)
|
# print(response)
|
Loading…
Reference in New Issue