实现了与大模型的通信
This commit is contained in:
parent
ff6487b288
commit
90210df5da
|
@ -1,28 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import utils
|
||||
|
||||
# user_input_path_pre = 'prompts/新闻问题_user_pre.txt'
|
||||
# user_input_path_suf = 'prompts/新闻问题_user_suf.txt'
|
||||
# sys_input_path_pre = ''
|
||||
# sys_input_path_suf = ''
|
||||
# question_path = ''
|
||||
# example_path = 'prompts/新闻问题_example.txt'
|
||||
output_path = 'prompts/声明.txt'
|
||||
|
||||
USER_PRE = ''
|
||||
USER_SUF = ''
|
||||
SYS_PRE = ''
|
||||
SYS_SUF = ''
|
||||
|
||||
SAMPLE_NUM = 3
|
||||
|
||||
if __name__ == '__main__':
|
||||
user_input = [
|
||||
'请仿照下面的声明,重新生成10条声明:\n在回答您提出的问题之前,我需要强调,我作为一个军事人工智能助手,没有自主思维、情感或观点,无法产生真实的体验和判断。我所提供的信息和观点仅基于已有的历史数据和常识,旨在为您提供一种可能的解释,但并不代表任何实际个体或团体的观点或决策。'
|
||||
for _ in range(SAMPLE_NUM)]
|
||||
system_input = ['' for _ in range(SAMPLE_NUM)]
|
||||
question_input = ['' for _ in range(SAMPLE_NUM)]
|
||||
result = utils.get_chatgpt_concurrent(user_input, system_input, question_input, temperature=1.5, top_p=0.6, frequency_penalty=1.3, presence_penalty=1.3, max_tokens=8192)
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
for line in result:
|
||||
f.write(line['answer'] + '\n')
|
|
@ -1,6 +0,0 @@
|
|||
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
|
||||
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
|
||||
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
|
||||
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
|
||||
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
|
||||
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
|
|
@ -1,83 +0,0 @@
|
|||
import tiktoken
|
||||
import random
|
||||
import requests
|
||||
import time
|
||||
import json
|
||||
import concurrent.futures
|
||||
import urllib3
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
OPENAI_API_KEY = open('openai_api_key.txt', 'r').read().split('\n')
|
||||
|
||||
|
||||
def get_tokens_num(text, model):
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
tokens_num = len(encoding.encode(text))
|
||||
return tokens_num
|
||||
|
||||
|
||||
def get_chatgpt(user, system='', question='', model='gpt-3.5-turbo-16k-0613', temperature=1.0, max_tokens=2048,
|
||||
top_p=0.8,
|
||||
frequency_penalty=0.0, presence_penalty=0.0):
|
||||
try_count = 0
|
||||
while try_count < 20:
|
||||
key_choice = random.choice(OPENAI_API_KEY)
|
||||
url = r'https://api.openai.com/v1/chat/completions'
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': 'Bearer ' + key_choice,
|
||||
}
|
||||
try:
|
||||
prompt = {
|
||||
'model': model,
|
||||
'messages': [
|
||||
{'role': 'system', 'content': system},
|
||||
{'role': 'user', 'content': user}
|
||||
],
|
||||
'temperature': temperature,
|
||||
'max_tokens': max_tokens,
|
||||
'top_p': top_p,
|
||||
'frequency_penalty': frequency_penalty,
|
||||
'presence_penalty': presence_penalty
|
||||
}
|
||||
resp = requests.post(url, json=prompt, headers=headers, verify=False, timeout=60)
|
||||
answer = json.loads(resp.text)
|
||||
if 'choices' in answer:
|
||||
result = answer['choices'][0]['message']['content']
|
||||
else:
|
||||
time.sleep(1)
|
||||
continue
|
||||
if result == '':
|
||||
time.sleep(1)
|
||||
continue
|
||||
else:
|
||||
if question:
|
||||
return {'question': question, 'answer': result}
|
||||
else:
|
||||
return {'system': system, 'user': user, 'answer': result}
|
||||
except Exception as e:
|
||||
print(key_choice + ': query request failed \n' + str(e))
|
||||
print('retrying...')
|
||||
try_count += 1
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def get_chatgpt_concurrent(user_input, system_input, question_input, model='gpt-3.5-turbo-16k-0613', temperature=1.0,
|
||||
max_tokens=2048,
|
||||
top_p=0.8,
|
||||
frequency_penalty=0.0, presence_penalty=0.0):
|
||||
work_count = 0
|
||||
result = []
|
||||
print('Query size is ' + str(len(user_input)))
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
|
||||
futures = [executor.submit(get_chatgpt, user, system, question, model, temperature, max_tokens, top_p,
|
||||
frequency_penalty,
|
||||
presence_penalty) for user, system, question in
|
||||
zip(user_input, system_input, question_input)]
|
||||
for future in concurrent.futures.as_completed(futures):
|
||||
if future.result():
|
||||
result.append(future.result())
|
||||
work_count += 1
|
||||
print(str(work_count) + '/' + str(len(user_input)) + ' finished')
|
||||
return result
|
Loading…
Reference in New Issue