增加了自定义请求头功能;

修复了API调用的相关功能.
This commit is contained in:
lintsinghua 2025-10-26 15:32:39 +08:00
parent ecfee09ad4
commit ea97d67165
16 changed files with 196 additions and 58 deletions

View File

@ -189,11 +189,18 @@ VITE_LLM_GAP_MS=500 # 请求间隔(ms)
```env
VITE_LLM_TIMEOUT=300000 # 增加超时时间
VITE_LLM_BASE_URL=https://your-proxy.com # 使用代理或中转服务
VITE_LLM_BASE_URL=https://your-proxy.com/v1 # 使用代理或中转服务
VITE_LLM_CONCURRENCY=1 # 降低并发数
VITE_LLM_GAP_MS=1000 # 增加请求间隔
```
**自定义请求头示例**(针对特殊中转站):
```env
# JSON 格式字符串
VITE_LLM_CUSTOM_HEADERS='{"X-API-Version":"v1","X-Custom-Auth":"token123"}'
```
### 常见问题
<details>
@ -581,19 +588,27 @@ pnpm lint
#### 核心LLM配置
| 变量名 | 必需 | 默认值 | 说明 |
|--------|------|--------|------|
| `VITE_LLM_PROVIDER` | ✅ | `gemini` | LLM提供商`gemini`\|`openai`\|`claude`\|`qwen`\|`deepseek`\|`zhipu`\|`moonshot`\|`baidu`\|`minimax`\|`doubao` |
| `VITE_LLM_PROVIDER` | ✅ | `gemini` | LLM提供商`gemini`\|`openai`\|`claude`\|`qwen`\|`deepseek`\|`zhipu`\|`moonshot`\|`baidu`\|`minimax`\|`doubao`\|`ollama` |
| `VITE_LLM_API_KEY` | ✅ | - | 通用API Key优先级高于平台专用配置 |
| `VITE_LLM_MODEL` | ❌ | 自动 | 模型名称(不指定则使用各平台默认模型) |
| `VITE_LLM_BASE_URL` | ❌ | - | 自定义API端点用于代理、中转或私有部署) |
| `VITE_LLM_BASE_URL` | ❌ | - | 自定义API端点**支持所有平台的中转站**、代理或私有部署) |
| `VITE_LLM_TIMEOUT` | ❌ | `150000` | 请求超时时间(毫秒) |
| `VITE_LLM_TEMPERATURE` | ❌ | `0.2` | 温度参数0.0-2.0),控制输出随机性 |
| `VITE_LLM_MAX_TOKENS` | ❌ | `4096` | 最大输出token数 |
| `VITE_LLM_CUSTOM_HEADERS` | ❌ | - | 自定义HTTP请求头JSON格式字符串用于特殊中转站或自建服务 |
> 💡 **API 格式支持**XCodeReviewer 支持三种主流 API 格式:
> - **OpenAI 兼容格式**(最常见):适用于大多数中转站和 OpenRouter
> - **Gemini 格式**Google Gemini 官方及兼容服务
> - **Claude 格式**Anthropic Claude 官方及兼容服务
>
> 配置时只需选择对应的 LLM 提供商,填入中转站地址和 Key 即可。自定义请求头功能可满足特殊中转站的额外要求。
#### 平台专用API Key配置可选
| 变量名 | 说明 | 特殊要求 |
|--------|------|---------|
| `VITE_GEMINI_API_KEY` | Google Gemini API Key | - |
| `VITE_GEMINI_MODEL` | Gemini模型 (默认: gemini-2.5-flash) | - |
| `VITE_GEMINI_MODEL` | Gemini模型 (默认: gemini-1.5-flash) | - |
| `VITE_OPENAI_API_KEY` | OpenAI API Key | - |
| `VITE_OPENAI_MODEL` | OpenAI模型 (默认: gpt-4o-mini) | - |
| `VITE_OPENAI_BASE_URL` | OpenAI自定义端点 | 用于中转服务 |

View File

@ -189,11 +189,18 @@ For timeout or connection issues, adjust these parameters:
```env
VITE_LLM_TIMEOUT=300000 # Increase timeout
VITE_LLM_BASE_URL=https://your-proxy.com # Use proxy or relay service
VITE_LLM_BASE_URL=https://your-proxy.com/v1 # Use proxy or relay service
VITE_LLM_CONCURRENCY=1 # Reduce concurrency
VITE_LLM_GAP_MS=1000 # Increase request interval
```
**Custom Headers Example** (for special relay services):
```env
# JSON format string
VITE_LLM_CUSTOM_HEADERS='{"X-API-Version":"v1","X-Custom-Auth":"token123"}'
```
### FAQ
<details>
@ -582,19 +589,27 @@ pnpm lint
#### Core LLM Configuration
| Variable | Required | Default | Description |
|----------|----------|---------|-------------|
| `VITE_LLM_PROVIDER` | ✅ | `gemini` | LLM provider: `gemini`\|`openai`\|`claude`\|`qwen`\|`deepseek`\|`zhipu`\|`moonshot`\|`baidu`\|`minimax`\|`doubao` |
| `VITE_LLM_PROVIDER` | ✅ | `gemini` | LLM provider: `gemini`\|`openai`\|`claude`\|`qwen`\|`deepseek`\|`zhipu`\|`moonshot`\|`baidu`\|`minimax`\|`doubao`\|`ollama` |
| `VITE_LLM_API_KEY` | ✅ | - | Universal API Key (higher priority than platform-specific config) |
| `VITE_LLM_MODEL` | ❌ | Auto | Model name (uses platform default if not specified) |
| `VITE_LLM_BASE_URL` | ❌ | - | Custom API endpoint (for proxy, relay, or private deployment) |
| `VITE_LLM_BASE_URL` | ❌ | - | Custom API endpoint (**supports relay services for all platforms**, proxy, or private deployment) |
| `VITE_LLM_TIMEOUT` | ❌ | `150000` | Request timeout (milliseconds) |
| `VITE_LLM_TEMPERATURE` | ❌ | `0.2` | Temperature parameter (0.0-2.0), controls output randomness |
| `VITE_LLM_MAX_TOKENS` | ❌ | `4096` | Maximum output tokens |
| `VITE_LLM_CUSTOM_HEADERS` | ❌ | - | Custom HTTP headers (JSON string format), for special relay services or self-hosted instances |
> 💡 **API Format Support**: XCodeReviewer supports 3 mainstream API formats:
> - **OpenAI-Compatible Format** (Most Common): Works with most relay services and OpenRouter
> - **Gemini Format**: Google Gemini official and compatible services
> - **Claude Format**: Anthropic Claude official and compatible services
>
> Simply select the corresponding LLM provider, enter the relay service address and Key. The custom headers feature can meet additional requirements of special relay services.
#### Platform-Specific API Key Configuration (Optional)
| Variable | Description | Special Requirements |
|----------|-------------|---------------------|
| `VITE_GEMINI_API_KEY` | Google Gemini API Key | - |
| `VITE_GEMINI_MODEL` | Gemini model (default: gemini-2.5-flash) | - |
| `VITE_GEMINI_MODEL` | Gemini model (default: gemini-1.5-flash) | - |
| `VITE_OPENAI_API_KEY` | OpenAI API Key | - |
| `VITE_OPENAI_MODEL` | OpenAI model (default: gpt-4o-mini) | - |
| `VITE_OPENAI_BASE_URL` | OpenAI custom endpoint | For relay services |

View File

@ -40,7 +40,7 @@ const LLM_PROVIDERS = [
// 默认模型配置
const DEFAULT_MODELS = {
gemini: 'gemini-2.5-flash',
gemini: 'gemini-1.5-flash',
openai: 'gpt-4o-mini',
claude: 'claude-3-5-sonnet-20241022',
qwen: 'qwen-turbo',
@ -62,6 +62,7 @@ interface SystemConfigData {
llmTimeout: number;
llmTemperature: number;
llmMaxTokens: number;
llmCustomHeaders: string;
// 平台专用配置
geminiApiKey: string;
@ -97,6 +98,7 @@ export function SystemConfig() {
llmTimeout: 150000,
llmTemperature: 0.2,
llmMaxTokens: 4096,
llmCustomHeaders: '',
geminiApiKey: '',
openaiApiKey: '',
claudeApiKey: '',
@ -155,6 +157,7 @@ export function SystemConfig() {
llmTimeout: Number(import.meta.env.VITE_LLM_TIMEOUT) || 150000,
llmTemperature: Number(import.meta.env.VITE_LLM_TEMPERATURE) || 0.2,
llmMaxTokens: Number(import.meta.env.VITE_LLM_MAX_TOKENS) || 4096,
llmCustomHeaders: import.meta.env.VITE_LLM_CUSTOM_HEADERS || '',
geminiApiKey: import.meta.env.VITE_GEMINI_API_KEY || '',
openaiApiKey: import.meta.env.VITE_OPENAI_API_KEY || '',
claudeApiKey: import.meta.env.VITE_CLAUDE_API_KEY || '',
@ -423,6 +426,18 @@ export function SystemConfig() {
/>
</div>
</div>
<div className="space-y-2 pt-4 border-t">
<Label></Label>
<Input
value={config.llmCustomHeaders}
onChange={(e) => updateConfig('llmCustomHeaders', e.target.value)}
placeholder='{"X-Custom-Header": "value", "Another-Header": "value2"}'
/>
<p className="text-xs text-muted-foreground">
JSON <code className="bg-muted px-1 py-0.5 rounded">&#123;"X-API-Version": "v1"&#125;</code>
</p>
</div>
</CardContent>
</Card>

View File

@ -28,10 +28,12 @@ export const env = {
LLM_TEMPERATURE: runtimeConfig?.llmTemperature !== undefined ? runtimeConfig.llmTemperature : (Number(import.meta.env.VITE_LLM_TEMPERATURE) || 0.2),
// LLM 最大token数
LLM_MAX_TOKENS: runtimeConfig?.llmMaxTokens || Number(import.meta.env.VITE_LLM_MAX_TOKENS) || 4096,
// LLM 自定义请求头 (JSON字符串格式)
LLM_CUSTOM_HEADERS: runtimeConfig?.llmCustomHeaders || import.meta.env.VITE_LLM_CUSTOM_HEADERS || '',
// ==================== Gemini AI 配置 (兼容旧配置) ====================
GEMINI_API_KEY: runtimeConfig?.geminiApiKey || import.meta.env.VITE_GEMINI_API_KEY || '',
GEMINI_MODEL: import.meta.env.VITE_GEMINI_MODEL || 'gemini-2.5-flash',
GEMINI_MODEL: import.meta.env.VITE_GEMINI_MODEL || 'gemini-1.5-flash',
GEMINI_TIMEOUT_MS: Number(import.meta.env.VITE_GEMINI_TIMEOUT_MS) || 25000,
// ==================== OpenAI 配置 ====================

View File

@ -47,12 +47,20 @@ export class ClaudeAdapter extends BaseLLMAdapter {
requestBody.system = systemMessage.content;
}
// 构建请求头
const headers: Record<string, string> = {
'x-api-key': this.config.apiKey,
'anthropic-version': '2023-06-01',
};
// 合并自定义请求头
if (this.config.customHeaders) {
Object.assign(headers, this.config.customHeaders);
}
const response = await fetch(`${this.baseUrl}/messages`, {
method: 'POST',
headers: this.buildHeaders({
'x-api-key': this.config.apiKey,
'anthropic-version': '2023-06-01',
}),
headers: this.buildHeaders(headers),
body: JSON.stringify(requestBody),
});

View File

@ -27,11 +27,19 @@ export class DeepSeekAdapter extends BaseLLMAdapter {
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
// DeepSeek API兼容OpenAI格式
// 构建请求头
const headers: Record<string, string> = {
'Authorization': `Bearer ${this.config.apiKey}`,
};
// 合并自定义请求头
if (this.config.customHeaders) {
Object.assign(headers, this.config.customHeaders);
}
const response = await fetch(`${this.baseUrl}/v1/chat/completions`, {
method: 'POST',
headers: this.buildHeaders({
'Authorization': `Bearer ${this.config.apiKey}`,
}),
headers: this.buildHeaders(headers),
body: JSON.stringify({
model: this.config.model,
messages: request.messages,

View File

@ -27,11 +27,14 @@ export class DoubaoAdapter extends BaseLLMAdapter {
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
// 豆包API兼容OpenAI格式
const headers: Record<string, string> = {
'Authorization': `Bearer ${this.config.apiKey}`,
};
if (this.config.customHeaders) Object.assign(headers, this.config.customHeaders);
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: this.buildHeaders({
'Authorization': `Bearer ${this.config.apiKey}`,
}),
headers: this.buildHeaders(headers),
body: JSON.stringify({
model: this.config.model,
messages: request.messages,

View File

@ -1,17 +1,17 @@
/**
* Google Gemini适配器
* Google Gemini适配器 - API和中转站
*/
import { GoogleGenerativeAI } from '@google/generative-ai';
import { BaseLLMAdapter } from '../base-adapter';
import type { LLMRequest, LLMResponse } from '../types';
export class GeminiAdapter extends BaseLLMAdapter {
private client: GoogleGenerativeAI;
private baseUrl: string;
constructor(config: any) {
super(config);
this.client = new GoogleGenerativeAI(this.config.apiKey);
// 支持自定义baseUrl中转站或使用官方API
this.baseUrl = this.config.baseUrl || 'https://generativelanguage.googleapis.com/v1beta';
}
async complete(request: LLMRequest): Promise<LLMResponse> {
@ -27,16 +27,7 @@ export class GeminiAdapter extends BaseLLMAdapter {
}
private async _generateContent(request: LLMRequest): Promise<LLMResponse> {
const model = this.client.getGenerativeModel({
model: this.config.model,
generationConfig: {
temperature: request.temperature ?? this.config.temperature,
maxOutputTokens: request.maxTokens ?? this.config.maxTokens,
topP: request.topP ?? this.config.topP,
}
});
// 将消息转换为Gemini格式
// 转换消息格式为 Gemini 格式
const contents = request.messages
.filter(msg => msg.role !== 'system')
.map(msg => ({
@ -44,24 +35,69 @@ export class GeminiAdapter extends BaseLLMAdapter {
parts: [{ text: msg.content }],
}));
// 系统消息作为第一条用户消息的前缀
// 将系统消息合并到第一条用户消息
const systemMessage = request.messages.find(msg => msg.role === 'system');
if (systemMessage && contents.length > 0) {
contents[0].parts[0].text = `${systemMessage.content}\n\n${contents[0].parts[0].text}`;
}
const result = await model.generateContent({
// 构建请求体
const requestBody = {
contents,
safetySettings: [],
generationConfig: {
temperature: request.temperature ?? this.config.temperature,
maxOutputTokens: request.maxTokens ?? this.config.maxTokens,
topP: request.topP ?? this.config.topP,
}
};
// 构建请求头
const headers: Record<string, string> = {
'Content-Type': 'application/json',
};
// 如果有自定义请求头,合并进去
if (this.config.customHeaders) {
Object.assign(headers, this.config.customHeaders);
}
// API Key 可能在 URL 参数或请求头中
const url = `${this.baseUrl}/models/${this.config.model}:generateContent?key=${this.config.apiKey}`;
const response = await fetch(url, {
method: 'POST',
headers: this.buildHeaders(headers),
body: JSON.stringify(requestBody),
});
const response = result.response;
const text = response.text();
if (!response.ok) {
const error = await response.json().catch(() => ({}));
throw {
statusCode: response.status,
message: error.error?.message || `HTTP ${response.status}: ${response.statusText}`,
details: error,
};
}
const data = await response.json();
// 解析 Gemini 响应格式
const candidate = data.candidates?.[0];
if (!candidate || !candidate.content) {
throw new Error('API响应格式异常: 缺少candidates或content字段');
}
const text = candidate.content.parts?.map((part: any) => part.text).join('') || '';
return {
content: text,
model: this.config.model,
finishReason: 'stop',
usage: data.usageMetadata ? {
promptTokens: data.usageMetadata.promptTokenCount || 0,
completionTokens: data.usageMetadata.candidatesTokenCount || 0,
totalTokens: data.usageMetadata.totalTokenCount || 0,
} : undefined,
finishReason: candidate.finishReason || 'stop',
};
}
@ -75,4 +111,3 @@ export class GeminiAdapter extends BaseLLMAdapter {
return true;
}
}

View File

@ -27,11 +27,14 @@ export class MinimaxAdapter extends BaseLLMAdapter {
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
// MiniMax API兼容OpenAI格式
const headers: Record<string, string> = {
'Authorization': `Bearer ${this.config.apiKey}`,
};
if (this.config.customHeaders) Object.assign(headers, this.config.customHeaders);
const response = await fetch(`${this.baseUrl}/text/chatcompletion_v2`, {
method: 'POST',
headers: this.buildHeaders({
'Authorization': `Bearer ${this.config.apiKey}`,
}),
headers: this.buildHeaders(headers),
body: JSON.stringify({
model: this.config.model,
messages: request.messages,

View File

@ -27,11 +27,14 @@ export class MoonshotAdapter extends BaseLLMAdapter {
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
// Moonshot API兼容OpenAI格式
const headers: Record<string, string> = {
'Authorization': `Bearer ${this.config.apiKey}`,
};
if (this.config.customHeaders) Object.assign(headers, this.config.customHeaders);
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: this.buildHeaders({
'Authorization': `Bearer ${this.config.apiKey}`,
}),
headers: this.buildHeaders(headers),
body: JSON.stringify({
model: this.config.model,
messages: request.messages,

View File

@ -37,9 +37,14 @@ export class OllamaAdapter extends BaseLLMAdapter {
headers['Authorization'] = `Bearer ${this.config.apiKey}`;
}
// 合并自定义请求头
if (this.config.customHeaders) {
Object.assign(headers, this.config.customHeaders);
}
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers,
headers: this.buildHeaders(headers),
body: JSON.stringify({
model: this.config.model,
messages: request.messages,

View File

@ -26,11 +26,19 @@ export class OpenAIAdapter extends BaseLLMAdapter {
}
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
// 构建请求头
const headers: Record<string, string> = {
'Authorization': `Bearer ${this.config.apiKey}`,
};
// 合并自定义请求头
if (this.config.customHeaders) {
Object.assign(headers, this.config.customHeaders);
}
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: this.buildHeaders({
'Authorization': `Bearer ${this.config.apiKey}`,
}),
headers: this.buildHeaders(headers),
body: JSON.stringify({
model: this.config.model,
messages: request.messages,

View File

@ -26,12 +26,15 @@ export class QwenAdapter extends BaseLLMAdapter {
}
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
const headers: Record<string, string> = {
'Authorization': `Bearer ${this.config.apiKey}`,
'X-DashScope-SSE': 'disable',
};
if (this.config.customHeaders) Object.assign(headers, this.config.customHeaders);
const response = await fetch(`${this.baseUrl}/services/aigc/text-generation/generation`, {
method: 'POST',
headers: this.buildHeaders({
'Authorization': `Bearer ${this.config.apiKey}`,
'X-DashScope-SSE': 'disable',
}),
headers: this.buildHeaders(headers),
body: JSON.stringify({
model: this.config.model,
input: {

View File

@ -27,11 +27,14 @@ export class ZhipuAdapter extends BaseLLMAdapter {
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
// 智谱AI API兼容OpenAI格式
const headers: Record<string, string> = {
'Authorization': `Bearer ${this.config.apiKey}`,
};
if (this.config.customHeaders) Object.assign(headers, this.config.customHeaders);
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: this.buildHeaders({
'Authorization': `Bearer ${this.config.apiKey}`,
}),
headers: this.buildHeaders(headers),
body: JSON.stringify({
model: this.config.model,
messages: request.messages,

View File

@ -80,6 +80,16 @@ export class LLMService {
baseUrl = env.OLLAMA_BASE_URL;
}
// 解析自定义请求头
let customHeaders: Record<string, string> | undefined;
if (env.LLM_CUSTOM_HEADERS) {
try {
customHeaders = JSON.parse(env.LLM_CUSTOM_HEADERS);
} catch (e) {
console.warn('Invalid LLM_CUSTOM_HEADERS format, should be JSON string');
}
}
const config: LLMConfig = {
provider,
apiKey,
@ -88,6 +98,7 @@ export class LLMService {
timeout: env.LLM_TIMEOUT || env.GEMINI_TIMEOUT_MS,
temperature: env.LLM_TEMPERATURE,
maxTokens: env.LLM_MAX_TOKENS,
customHeaders,
};
return new LLMService(config);

View File

@ -28,6 +28,7 @@ export interface LLMConfig {
topP?: number; // Top-p采样
frequencyPenalty?: number; // 频率惩罚
presencePenalty?: number; // 存在惩罚
customHeaders?: Record<string, string>; // 自定义请求头
}
// LLM请求消息