diff --git a/README.md b/README.md index 9c9492b..ba9a6d5 100644 --- a/README.md +++ b/README.md @@ -189,11 +189,18 @@ VITE_LLM_GAP_MS=500 # 请求间隔(ms) ```env VITE_LLM_TIMEOUT=300000 # 增加超时时间 -VITE_LLM_BASE_URL=https://your-proxy.com # 使用代理或中转服务 +VITE_LLM_BASE_URL=https://your-proxy.com/v1 # 使用代理或中转服务 VITE_LLM_CONCURRENCY=1 # 降低并发数 VITE_LLM_GAP_MS=1000 # 增加请求间隔 ``` +**自定义请求头示例**(针对特殊中转站): + +```env +# JSON 格式字符串 +VITE_LLM_CUSTOM_HEADERS='{"X-API-Version":"v1","X-Custom-Auth":"token123"}' +``` + ### 常见问题
@@ -581,19 +588,27 @@ pnpm lint #### 核心LLM配置 | 变量名 | 必需 | 默认值 | 说明 | |--------|------|--------|------| -| `VITE_LLM_PROVIDER` | ✅ | `gemini` | LLM提供商:`gemini`\|`openai`\|`claude`\|`qwen`\|`deepseek`\|`zhipu`\|`moonshot`\|`baidu`\|`minimax`\|`doubao` | +| `VITE_LLM_PROVIDER` | ✅ | `gemini` | LLM提供商:`gemini`\|`openai`\|`claude`\|`qwen`\|`deepseek`\|`zhipu`\|`moonshot`\|`baidu`\|`minimax`\|`doubao`\|`ollama` | | `VITE_LLM_API_KEY` | ✅ | - | 通用API Key(优先级高于平台专用配置) | | `VITE_LLM_MODEL` | ❌ | 自动 | 模型名称(不指定则使用各平台默认模型) | -| `VITE_LLM_BASE_URL` | ❌ | - | 自定义API端点(用于代理、中转或私有部署) | +| `VITE_LLM_BASE_URL` | ❌ | - | 自定义API端点(**支持所有平台的中转站**、代理或私有部署) | | `VITE_LLM_TIMEOUT` | ❌ | `150000` | 请求超时时间(毫秒) | | `VITE_LLM_TEMPERATURE` | ❌ | `0.2` | 温度参数(0.0-2.0),控制输出随机性 | | `VITE_LLM_MAX_TOKENS` | ❌ | `4096` | 最大输出token数 | +| `VITE_LLM_CUSTOM_HEADERS` | ❌ | - | 自定义HTTP请求头(JSON格式字符串),用于特殊中转站或自建服务 | + +> 💡 **API 格式支持**:XCodeReviewer 支持三种主流 API 格式: +> - **OpenAI 兼容格式**(最常见):适用于大多数中转站和 OpenRouter +> - **Gemini 格式**:Google Gemini 官方及兼容服务 +> - **Claude 格式**:Anthropic Claude 官方及兼容服务 +> +> 配置时只需选择对应的 LLM 提供商,填入中转站地址和 Key 即可。自定义请求头功能可满足特殊中转站的额外要求。 #### 平台专用API Key配置(可选) | 变量名 | 说明 | 特殊要求 | |--------|------|---------| | `VITE_GEMINI_API_KEY` | Google Gemini API Key | - | -| `VITE_GEMINI_MODEL` | Gemini模型 (默认: gemini-2.5-flash) | - | +| `VITE_GEMINI_MODEL` | Gemini模型 (默认: gemini-1.5-flash) | - | | `VITE_OPENAI_API_KEY` | OpenAI API Key | - | | `VITE_OPENAI_MODEL` | OpenAI模型 (默认: gpt-4o-mini) | - | | `VITE_OPENAI_BASE_URL` | OpenAI自定义端点 | 用于中转服务 | diff --git a/README_EN.md b/README_EN.md index 8dfc3f6..f4f5484 100644 --- a/README_EN.md +++ b/README_EN.md @@ -189,11 +189,18 @@ For timeout or connection issues, adjust these parameters: ```env VITE_LLM_TIMEOUT=300000 # Increase timeout -VITE_LLM_BASE_URL=https://your-proxy.com # Use proxy or relay service +VITE_LLM_BASE_URL=https://your-proxy.com/v1 # Use proxy or relay service VITE_LLM_CONCURRENCY=1 # Reduce concurrency VITE_LLM_GAP_MS=1000 # Increase request interval ``` +**Custom Headers Example** (for special relay services): + +```env +# JSON format string +VITE_LLM_CUSTOM_HEADERS='{"X-API-Version":"v1","X-Custom-Auth":"token123"}' +``` + ### FAQ
@@ -582,19 +589,27 @@ pnpm lint #### Core LLM Configuration | Variable | Required | Default | Description | |----------|----------|---------|-------------| -| `VITE_LLM_PROVIDER` | ✅ | `gemini` | LLM provider: `gemini`\|`openai`\|`claude`\|`qwen`\|`deepseek`\|`zhipu`\|`moonshot`\|`baidu`\|`minimax`\|`doubao` | +| `VITE_LLM_PROVIDER` | ✅ | `gemini` | LLM provider: `gemini`\|`openai`\|`claude`\|`qwen`\|`deepseek`\|`zhipu`\|`moonshot`\|`baidu`\|`minimax`\|`doubao`\|`ollama` | | `VITE_LLM_API_KEY` | ✅ | - | Universal API Key (higher priority than platform-specific config) | | `VITE_LLM_MODEL` | ❌ | Auto | Model name (uses platform default if not specified) | -| `VITE_LLM_BASE_URL` | ❌ | - | Custom API endpoint (for proxy, relay, or private deployment) | +| `VITE_LLM_BASE_URL` | ❌ | - | Custom API endpoint (**supports relay services for all platforms**, proxy, or private deployment) | | `VITE_LLM_TIMEOUT` | ❌ | `150000` | Request timeout (milliseconds) | | `VITE_LLM_TEMPERATURE` | ❌ | `0.2` | Temperature parameter (0.0-2.0), controls output randomness | | `VITE_LLM_MAX_TOKENS` | ❌ | `4096` | Maximum output tokens | +| `VITE_LLM_CUSTOM_HEADERS` | ❌ | - | Custom HTTP headers (JSON string format), for special relay services or self-hosted instances | + +> 💡 **API Format Support**: XCodeReviewer supports 3 mainstream API formats: +> - **OpenAI-Compatible Format** (Most Common): Works with most relay services and OpenRouter +> - **Gemini Format**: Google Gemini official and compatible services +> - **Claude Format**: Anthropic Claude official and compatible services +> +> Simply select the corresponding LLM provider, enter the relay service address and Key. The custom headers feature can meet additional requirements of special relay services. #### Platform-Specific API Key Configuration (Optional) | Variable | Description | Special Requirements | |----------|-------------|---------------------| | `VITE_GEMINI_API_KEY` | Google Gemini API Key | - | -| `VITE_GEMINI_MODEL` | Gemini model (default: gemini-2.5-flash) | - | +| `VITE_GEMINI_MODEL` | Gemini model (default: gemini-1.5-flash) | - | | `VITE_OPENAI_API_KEY` | OpenAI API Key | - | | `VITE_OPENAI_MODEL` | OpenAI model (default: gpt-4o-mini) | - | | `VITE_OPENAI_BASE_URL` | OpenAI custom endpoint | For relay services | diff --git a/src/components/system/SystemConfig.tsx b/src/components/system/SystemConfig.tsx index 1347c7a..392783f 100644 --- a/src/components/system/SystemConfig.tsx +++ b/src/components/system/SystemConfig.tsx @@ -40,7 +40,7 @@ const LLM_PROVIDERS = [ // 默认模型配置 const DEFAULT_MODELS = { - gemini: 'gemini-2.5-flash', + gemini: 'gemini-1.5-flash', openai: 'gpt-4o-mini', claude: 'claude-3-5-sonnet-20241022', qwen: 'qwen-turbo', @@ -62,6 +62,7 @@ interface SystemConfigData { llmTimeout: number; llmTemperature: number; llmMaxTokens: number; + llmCustomHeaders: string; // 平台专用配置 geminiApiKey: string; @@ -97,6 +98,7 @@ export function SystemConfig() { llmTimeout: 150000, llmTemperature: 0.2, llmMaxTokens: 4096, + llmCustomHeaders: '', geminiApiKey: '', openaiApiKey: '', claudeApiKey: '', @@ -155,6 +157,7 @@ export function SystemConfig() { llmTimeout: Number(import.meta.env.VITE_LLM_TIMEOUT) || 150000, llmTemperature: Number(import.meta.env.VITE_LLM_TEMPERATURE) || 0.2, llmMaxTokens: Number(import.meta.env.VITE_LLM_MAX_TOKENS) || 4096, + llmCustomHeaders: import.meta.env.VITE_LLM_CUSTOM_HEADERS || '', geminiApiKey: import.meta.env.VITE_GEMINI_API_KEY || '', openaiApiKey: import.meta.env.VITE_OPENAI_API_KEY || '', claudeApiKey: import.meta.env.VITE_CLAUDE_API_KEY || '', @@ -423,6 +426,18 @@ export function SystemConfig() { /> + +
+ + updateConfig('llmCustomHeaders', e.target.value)} + placeholder='{"X-Custom-Header": "value", "Another-Header": "value2"}' + /> +

+ JSON 格式,用于某些中转站或自建服务的特殊要求。例如:{"X-API-Version": "v1"} +

+
diff --git a/src/shared/config/env.ts b/src/shared/config/env.ts index 8d9488b..c327d8b 100644 --- a/src/shared/config/env.ts +++ b/src/shared/config/env.ts @@ -28,10 +28,12 @@ export const env = { LLM_TEMPERATURE: runtimeConfig?.llmTemperature !== undefined ? runtimeConfig.llmTemperature : (Number(import.meta.env.VITE_LLM_TEMPERATURE) || 0.2), // LLM 最大token数 LLM_MAX_TOKENS: runtimeConfig?.llmMaxTokens || Number(import.meta.env.VITE_LLM_MAX_TOKENS) || 4096, + // LLM 自定义请求头 (JSON字符串格式) + LLM_CUSTOM_HEADERS: runtimeConfig?.llmCustomHeaders || import.meta.env.VITE_LLM_CUSTOM_HEADERS || '', // ==================== Gemini AI 配置 (兼容旧配置) ==================== GEMINI_API_KEY: runtimeConfig?.geminiApiKey || import.meta.env.VITE_GEMINI_API_KEY || '', - GEMINI_MODEL: import.meta.env.VITE_GEMINI_MODEL || 'gemini-2.5-flash', + GEMINI_MODEL: import.meta.env.VITE_GEMINI_MODEL || 'gemini-1.5-flash', GEMINI_TIMEOUT_MS: Number(import.meta.env.VITE_GEMINI_TIMEOUT_MS) || 25000, // ==================== OpenAI 配置 ==================== diff --git a/src/shared/services/llm/adapters/claude-adapter.ts b/src/shared/services/llm/adapters/claude-adapter.ts index 3de4bd3..ee5c285 100644 --- a/src/shared/services/llm/adapters/claude-adapter.ts +++ b/src/shared/services/llm/adapters/claude-adapter.ts @@ -47,12 +47,20 @@ export class ClaudeAdapter extends BaseLLMAdapter { requestBody.system = systemMessage.content; } + // 构建请求头 + const headers: Record = { + 'x-api-key': this.config.apiKey, + 'anthropic-version': '2023-06-01', + }; + + // 合并自定义请求头 + if (this.config.customHeaders) { + Object.assign(headers, this.config.customHeaders); + } + const response = await fetch(`${this.baseUrl}/messages`, { method: 'POST', - headers: this.buildHeaders({ - 'x-api-key': this.config.apiKey, - 'anthropic-version': '2023-06-01', - }), + headers: this.buildHeaders(headers), body: JSON.stringify(requestBody), }); diff --git a/src/shared/services/llm/adapters/deepseek-adapter.ts b/src/shared/services/llm/adapters/deepseek-adapter.ts index 6958680..caad305 100644 --- a/src/shared/services/llm/adapters/deepseek-adapter.ts +++ b/src/shared/services/llm/adapters/deepseek-adapter.ts @@ -27,11 +27,19 @@ export class DeepSeekAdapter extends BaseLLMAdapter { private async _sendRequest(request: LLMRequest): Promise { // DeepSeek API兼容OpenAI格式 + // 构建请求头 + const headers: Record = { + 'Authorization': `Bearer ${this.config.apiKey}`, + }; + + // 合并自定义请求头 + if (this.config.customHeaders) { + Object.assign(headers, this.config.customHeaders); + } + const response = await fetch(`${this.baseUrl}/v1/chat/completions`, { method: 'POST', - headers: this.buildHeaders({ - 'Authorization': `Bearer ${this.config.apiKey}`, - }), + headers: this.buildHeaders(headers), body: JSON.stringify({ model: this.config.model, messages: request.messages, diff --git a/src/shared/services/llm/adapters/doubao-adapter.ts b/src/shared/services/llm/adapters/doubao-adapter.ts index ca27f2f..03478d0 100644 --- a/src/shared/services/llm/adapters/doubao-adapter.ts +++ b/src/shared/services/llm/adapters/doubao-adapter.ts @@ -27,11 +27,14 @@ export class DoubaoAdapter extends BaseLLMAdapter { private async _sendRequest(request: LLMRequest): Promise { // 豆包API兼容OpenAI格式 + const headers: Record = { + 'Authorization': `Bearer ${this.config.apiKey}`, + }; + if (this.config.customHeaders) Object.assign(headers, this.config.customHeaders); + const response = await fetch(`${this.baseUrl}/chat/completions`, { method: 'POST', - headers: this.buildHeaders({ - 'Authorization': `Bearer ${this.config.apiKey}`, - }), + headers: this.buildHeaders(headers), body: JSON.stringify({ model: this.config.model, messages: request.messages, diff --git a/src/shared/services/llm/adapters/gemini-adapter.ts b/src/shared/services/llm/adapters/gemini-adapter.ts index 7611cbb..b822603 100644 --- a/src/shared/services/llm/adapters/gemini-adapter.ts +++ b/src/shared/services/llm/adapters/gemini-adapter.ts @@ -1,17 +1,17 @@ /** - * Google Gemini适配器 + * Google Gemini适配器 - 支持官方API和中转站 */ -import { GoogleGenerativeAI } from '@google/generative-ai'; import { BaseLLMAdapter } from '../base-adapter'; import type { LLMRequest, LLMResponse } from '../types'; export class GeminiAdapter extends BaseLLMAdapter { - private client: GoogleGenerativeAI; + private baseUrl: string; constructor(config: any) { super(config); - this.client = new GoogleGenerativeAI(this.config.apiKey); + // 支持自定义baseUrl(中转站)或使用官方API + this.baseUrl = this.config.baseUrl || 'https://generativelanguage.googleapis.com/v1beta'; } async complete(request: LLMRequest): Promise { @@ -27,16 +27,7 @@ export class GeminiAdapter extends BaseLLMAdapter { } private async _generateContent(request: LLMRequest): Promise { - const model = this.client.getGenerativeModel({ - model: this.config.model, - generationConfig: { - temperature: request.temperature ?? this.config.temperature, - maxOutputTokens: request.maxTokens ?? this.config.maxTokens, - topP: request.topP ?? this.config.topP, - } - }); - - // 将消息转换为Gemini格式 + // 转换消息格式为 Gemini 格式 const contents = request.messages .filter(msg => msg.role !== 'system') .map(msg => ({ @@ -44,24 +35,69 @@ export class GeminiAdapter extends BaseLLMAdapter { parts: [{ text: msg.content }], })); - // 系统消息作为第一条用户消息的前缀 + // 将系统消息合并到第一条用户消息 const systemMessage = request.messages.find(msg => msg.role === 'system'); if (systemMessage && contents.length > 0) { contents[0].parts[0].text = `${systemMessage.content}\n\n${contents[0].parts[0].text}`; } - const result = await model.generateContent({ + // 构建请求体 + const requestBody = { contents, - safetySettings: [], + generationConfig: { + temperature: request.temperature ?? this.config.temperature, + maxOutputTokens: request.maxTokens ?? this.config.maxTokens, + topP: request.topP ?? this.config.topP, + } + }; + + // 构建请求头 + const headers: Record = { + 'Content-Type': 'application/json', + }; + + // 如果有自定义请求头,合并进去 + if (this.config.customHeaders) { + Object.assign(headers, this.config.customHeaders); + } + + // API Key 可能在 URL 参数或请求头中 + const url = `${this.baseUrl}/models/${this.config.model}:generateContent?key=${this.config.apiKey}`; + + const response = await fetch(url, { + method: 'POST', + headers: this.buildHeaders(headers), + body: JSON.stringify(requestBody), }); - const response = result.response; - const text = response.text(); + if (!response.ok) { + const error = await response.json().catch(() => ({})); + throw { + statusCode: response.status, + message: error.error?.message || `HTTP ${response.status}: ${response.statusText}`, + details: error, + }; + } + + const data = await response.json(); + + // 解析 Gemini 响应格式 + const candidate = data.candidates?.[0]; + if (!candidate || !candidate.content) { + throw new Error('API响应格式异常: 缺少candidates或content字段'); + } + + const text = candidate.content.parts?.map((part: any) => part.text).join('') || ''; return { content: text, model: this.config.model, - finishReason: 'stop', + usage: data.usageMetadata ? { + promptTokens: data.usageMetadata.promptTokenCount || 0, + completionTokens: data.usageMetadata.candidatesTokenCount || 0, + totalTokens: data.usageMetadata.totalTokenCount || 0, + } : undefined, + finishReason: candidate.finishReason || 'stop', }; } @@ -75,4 +111,3 @@ export class GeminiAdapter extends BaseLLMAdapter { return true; } } - diff --git a/src/shared/services/llm/adapters/minimax-adapter.ts b/src/shared/services/llm/adapters/minimax-adapter.ts index fd9c523..51ba5c4 100644 --- a/src/shared/services/llm/adapters/minimax-adapter.ts +++ b/src/shared/services/llm/adapters/minimax-adapter.ts @@ -27,11 +27,14 @@ export class MinimaxAdapter extends BaseLLMAdapter { private async _sendRequest(request: LLMRequest): Promise { // MiniMax API兼容OpenAI格式 + const headers: Record = { + 'Authorization': `Bearer ${this.config.apiKey}`, + }; + if (this.config.customHeaders) Object.assign(headers, this.config.customHeaders); + const response = await fetch(`${this.baseUrl}/text/chatcompletion_v2`, { method: 'POST', - headers: this.buildHeaders({ - 'Authorization': `Bearer ${this.config.apiKey}`, - }), + headers: this.buildHeaders(headers), body: JSON.stringify({ model: this.config.model, messages: request.messages, diff --git a/src/shared/services/llm/adapters/moonshot-adapter.ts b/src/shared/services/llm/adapters/moonshot-adapter.ts index 2bb0c9c..0985188 100644 --- a/src/shared/services/llm/adapters/moonshot-adapter.ts +++ b/src/shared/services/llm/adapters/moonshot-adapter.ts @@ -27,11 +27,14 @@ export class MoonshotAdapter extends BaseLLMAdapter { private async _sendRequest(request: LLMRequest): Promise { // Moonshot API兼容OpenAI格式 + const headers: Record = { + 'Authorization': `Bearer ${this.config.apiKey}`, + }; + if (this.config.customHeaders) Object.assign(headers, this.config.customHeaders); + const response = await fetch(`${this.baseUrl}/chat/completions`, { method: 'POST', - headers: this.buildHeaders({ - 'Authorization': `Bearer ${this.config.apiKey}`, - }), + headers: this.buildHeaders(headers), body: JSON.stringify({ model: this.config.model, messages: request.messages, diff --git a/src/shared/services/llm/adapters/ollama-adapter.ts b/src/shared/services/llm/adapters/ollama-adapter.ts index 2eeae87..c4c8e3a 100644 --- a/src/shared/services/llm/adapters/ollama-adapter.ts +++ b/src/shared/services/llm/adapters/ollama-adapter.ts @@ -37,9 +37,14 @@ export class OllamaAdapter extends BaseLLMAdapter { headers['Authorization'] = `Bearer ${this.config.apiKey}`; } + // 合并自定义请求头 + if (this.config.customHeaders) { + Object.assign(headers, this.config.customHeaders); + } + const response = await fetch(`${this.baseUrl}/chat/completions`, { method: 'POST', - headers, + headers: this.buildHeaders(headers), body: JSON.stringify({ model: this.config.model, messages: request.messages, diff --git a/src/shared/services/llm/adapters/openai-adapter.ts b/src/shared/services/llm/adapters/openai-adapter.ts index d56ea5e..56bebe4 100644 --- a/src/shared/services/llm/adapters/openai-adapter.ts +++ b/src/shared/services/llm/adapters/openai-adapter.ts @@ -26,11 +26,19 @@ export class OpenAIAdapter extends BaseLLMAdapter { } private async _sendRequest(request: LLMRequest): Promise { + // 构建请求头 + const headers: Record = { + 'Authorization': `Bearer ${this.config.apiKey}`, + }; + + // 合并自定义请求头 + if (this.config.customHeaders) { + Object.assign(headers, this.config.customHeaders); + } + const response = await fetch(`${this.baseUrl}/chat/completions`, { method: 'POST', - headers: this.buildHeaders({ - 'Authorization': `Bearer ${this.config.apiKey}`, - }), + headers: this.buildHeaders(headers), body: JSON.stringify({ model: this.config.model, messages: request.messages, diff --git a/src/shared/services/llm/adapters/qwen-adapter.ts b/src/shared/services/llm/adapters/qwen-adapter.ts index e32fd41..639346d 100644 --- a/src/shared/services/llm/adapters/qwen-adapter.ts +++ b/src/shared/services/llm/adapters/qwen-adapter.ts @@ -26,12 +26,15 @@ export class QwenAdapter extends BaseLLMAdapter { } private async _sendRequest(request: LLMRequest): Promise { + const headers: Record = { + 'Authorization': `Bearer ${this.config.apiKey}`, + 'X-DashScope-SSE': 'disable', + }; + if (this.config.customHeaders) Object.assign(headers, this.config.customHeaders); + const response = await fetch(`${this.baseUrl}/services/aigc/text-generation/generation`, { method: 'POST', - headers: this.buildHeaders({ - 'Authorization': `Bearer ${this.config.apiKey}`, - 'X-DashScope-SSE': 'disable', - }), + headers: this.buildHeaders(headers), body: JSON.stringify({ model: this.config.model, input: { diff --git a/src/shared/services/llm/adapters/zhipu-adapter.ts b/src/shared/services/llm/adapters/zhipu-adapter.ts index 6b98f17..4152385 100644 --- a/src/shared/services/llm/adapters/zhipu-adapter.ts +++ b/src/shared/services/llm/adapters/zhipu-adapter.ts @@ -27,11 +27,14 @@ export class ZhipuAdapter extends BaseLLMAdapter { private async _sendRequest(request: LLMRequest): Promise { // 智谱AI API兼容OpenAI格式 + const headers: Record = { + 'Authorization': `Bearer ${this.config.apiKey}`, + }; + if (this.config.customHeaders) Object.assign(headers, this.config.customHeaders); + const response = await fetch(`${this.baseUrl}/chat/completions`, { method: 'POST', - headers: this.buildHeaders({ - 'Authorization': `Bearer ${this.config.apiKey}`, - }), + headers: this.buildHeaders(headers), body: JSON.stringify({ model: this.config.model, messages: request.messages, diff --git a/src/shared/services/llm/llm-service.ts b/src/shared/services/llm/llm-service.ts index e4ba19d..ac4f765 100644 --- a/src/shared/services/llm/llm-service.ts +++ b/src/shared/services/llm/llm-service.ts @@ -80,6 +80,16 @@ export class LLMService { baseUrl = env.OLLAMA_BASE_URL; } + // 解析自定义请求头 + let customHeaders: Record | undefined; + if (env.LLM_CUSTOM_HEADERS) { + try { + customHeaders = JSON.parse(env.LLM_CUSTOM_HEADERS); + } catch (e) { + console.warn('Invalid LLM_CUSTOM_HEADERS format, should be JSON string'); + } + } + const config: LLMConfig = { provider, apiKey, @@ -88,6 +98,7 @@ export class LLMService { timeout: env.LLM_TIMEOUT || env.GEMINI_TIMEOUT_MS, temperature: env.LLM_TEMPERATURE, maxTokens: env.LLM_MAX_TOKENS, + customHeaders, }; return new LLMService(config); diff --git a/src/shared/services/llm/types.ts b/src/shared/services/llm/types.ts index 9430b20..d4291de 100644 --- a/src/shared/services/llm/types.ts +++ b/src/shared/services/llm/types.ts @@ -28,6 +28,7 @@ export interface LLMConfig { topP?: number; // Top-p采样 frequencyPenalty?: number; // 频率惩罚 presencePenalty?: number; // 存在惩罚 + customHeaders?: Record; // 自定义请求头 } // LLM请求消息