Refactor LLM configuration and add support for multiple providers in environment setup. Update README and example files to reflect new configuration options and usage instructions. Introduce LLM service architecture with adapters for various AI providers.

This commit is contained in:
lintsinghua 2025-10-24 10:59:05 +08:00
parent 306a19ec33
commit 363608a836
22 changed files with 2258 additions and 118 deletions

View File

@ -1,19 +1,104 @@
# Google Gemini AI 配置 (必需)
# ========================================
# XCodeReviewer 环境变量配置示例
# ========================================
# 复制此文件为 .env 并填写你的配置
# ==================== LLM 通用配置 ====================
# 选择你想使用的LLM提供商 (gemini|openai|claude|qwen|deepseek|zhipu|moonshot|baidu|minimax|doubao)
VITE_LLM_PROVIDER=gemini
# 通用LLM配置 (可选,如果设置了这些,会覆盖下面的特定平台配置)
# VITE_LLM_API_KEY=your_api_key_here
# VITE_LLM_MODEL=your_model_name
# VITE_LLM_BASE_URL=https://custom-api-endpoint.com
# VITE_LLM_TIMEOUT=30000
# VITE_LLM_TEMPERATURE=0.2
# VITE_LLM_MAX_TOKENS=4096
# ==================== Google Gemini 配置 ====================
# 获取API Key: https://makersuite.google.com/app/apikey
VITE_GEMINI_API_KEY=your_gemini_api_key_here
VITE_GEMINI_MODEL=gemini-2.5-flash
VITE_GEMINI_TIMEOUT_MS=25000
# Supabase 配置 (可选,用于数据持久化)
VITE_SUPABASE_URL=https://your-project.supabase.co
VITE_SUPABASE_ANON_KEY=your-anon-key-here
# ==================== OpenAI 配置 ====================
# 获取API Key: https://platform.openai.com/api-keys
# VITE_OPENAI_API_KEY=your_openai_api_key_here
# VITE_OPENAI_MODEL=gpt-4o-mini
# VITE_OPENAI_BASE_URL=https://api.openai.com/v1
# GitHub 集成 (可选,用于仓库分析)
VITE_GITHUB_TOKEN=your_github_token_here
# ==================== Anthropic Claude 配置 ====================
# 获取API Key: https://console.anthropic.com/
# VITE_CLAUDE_API_KEY=your_claude_api_key_here
# VITE_CLAUDE_MODEL=claude-3-5-sonnet-20241022
# 应用配置
# ==================== 阿里云通义千问 配置 ====================
# 获取API Key: https://dashscope.console.aliyun.com/
# VITE_QWEN_API_KEY=your_qwen_api_key_here
# VITE_QWEN_MODEL=qwen-turbo
# ==================== DeepSeek 配置 ====================
# 获取API Key: https://platform.deepseek.com/
# VITE_DEEPSEEK_API_KEY=your_deepseek_api_key_here
# VITE_DEEPSEEK_MODEL=deepseek-chat
# ==================== 智谱AI (GLM) 配置 ====================
# 获取API Key: https://open.bigmodel.cn/
# VITE_ZHIPU_API_KEY=your_zhipu_api_key_here
# VITE_ZHIPU_MODEL=glm-4-flash
# ==================== 月之暗面 Kimi 配置 ====================
# 获取API Key: https://platform.moonshot.cn/
# VITE_MOONSHOT_API_KEY=your_moonshot_api_key_here
# VITE_MOONSHOT_MODEL=moonshot-v1-8k
# ==================== 百度文心一言 配置 ====================
# 获取API Key: https://console.bce.baidu.com/qianfan/
# 注意百度API Key格式为 "API_KEY:SECRET_KEY"
# VITE_BAIDU_API_KEY=your_api_key:your_secret_key
# VITE_BAIDU_MODEL=ERNIE-3.5-8K
# ==================== MiniMax 配置 ====================
# 获取API Key: https://www.minimaxi.com/
# VITE_MINIMAX_API_KEY=your_minimax_api_key_here
# VITE_MINIMAX_MODEL=abab6.5-chat
# ==================== 字节豆包 配置 ====================
# 获取API Key: https://console.volcengine.com/ark
# 注意豆包使用endpoint ID需要先创建推理接入点
# VITE_DOUBAO_API_KEY=your_doubao_api_key_here
# VITE_DOUBAO_MODEL=ep-xxxxx-xxxxx # 填写你的endpoint ID
# ==================== 字节豆包 配置 ====================
# 获取API Key: https://console.volcengine.com/ark
# 注意豆包使用endpoint ID需要先创建推理接入点
# VITE_DOUBAO_API_KEY=your_doubao_api_key_here
# VITE_DOUBAO_MODEL=ep-xxxxx-xxxxx # 填写你的endpoint ID
# ==================== 字节豆包 配置 ====================
# 获取API Key: https://console.volcengine.com/ark
# 注意豆包使用endpoint ID需要先创建推理接入点
# VITE_DOUBAO_API_KEY=your_doubao_api_key_here
# VITE_DOUBAO_MODEL=ep-xxxxx-xxxxx # 填写你的endpoint ID
# ==================== 字节豆包 配置 ====================
# 获取API Key: https://console.volcengine.com/ark
# 注意豆包使用endpoint ID需要先创建推理接入点
# VITE_DOUBAO_API_KEY=your_doubao_api_key_here
# VITE_DOUBAO_MODEL=ep-xxxxx-xxxxx # 填写你的endpoint ID
# ==================== Supabase 数据库配置 (可选) ====================
# 如果不配置,系统将以演示模式运行,数据不会持久化
# 获取配置: https://supabase.com/
# VITE_SUPABASE_URL=https://your-project.supabase.co
# VITE_SUPABASE_ANON_KEY=your-anon-key-here
# ==================== GitHub 集成配置 (可选) ====================
# 用于仓库分析功能
# 获取Token: https://github.com/settings/tokens
# VITE_GITHUB_TOKEN=your_github_token_here
# ==================== 应用配置 ====================
VITE_APP_ID=xcodereviewer
# 分析配置
# ==================== 代码分析配置 ====================
VITE_MAX_ANALYZE_FILES=40
VITE_LLM_CONCURRENCY=2
VITE_LLM_GAP_MS=500
VITE_LLM_GAP_MS=500

1
.gitignore vendored
View File

@ -16,6 +16,7 @@ build/
.env.development.local
.env.test.local
.env.production.local
.env.me
# IDE and editor files
.vscode/

253
README.md
View File

@ -62,7 +62,13 @@
2. **配置环境变量**
```bash
cp .env.example .env
# 编辑 .env 文件,至少需要配置 VITE_GEMINI_API_KEY
# 编辑 .env 文件配置LLM提供商和API Key
# 方式一:使用通用配置(推荐)
# VITE_LLM_PROVIDER=gemini
# VITE_LLM_API_KEY=your_api_key
#
# 方式二:使用平台专用配置
# VITE_GEMINI_API_KEY=your_gemini_api_key
```
3. **构建并启动**
@ -113,10 +119,16 @@
编辑 `.env` 文件,配置必要的环境变量:
```env
# Google Gemini AI 配置 (必需)
# LLM 通用配置 (推荐方式)
VITE_LLM_PROVIDER=gemini # 选择提供商 (gemini|openai|claude|qwen|deepseek等)
VITE_LLM_API_KEY=your_api_key_here # 对应的API Key
VITE_LLM_MODEL=gemini-2.5-flash # 模型名称 (可选)
# 或使用平台专用配置
VITE_GEMINI_API_KEY=your_gemini_api_key_here
VITE_GEMINI_MODEL=gemini-2.5-flash
VITE_GEMINI_TIMEOUT_MS=25000
VITE_OPENAI_API_KEY=your_openai_api_key_here
VITE_CLAUDE_API_KEY=your_claude_api_key_here
# ... 支持10+主流平台
# Supabase 配置 (可选,用于数据持久化)
VITE_SUPABASE_URL=https://your-project.supabase.co
@ -142,12 +154,165 @@
5. **访问应用**
在浏览器中打开 `http://localhost:5173`
#### ⚙️ 高级配置(可选)
如果遇到超时或连接问题,可以调整以下配置:
```env
# 增加超时时间默认150000ms
VITE_LLM_TIMEOUT=150000
# 使用自定义API端点适用于代理或私有部署
VITE_LLM_BASE_URL=https://your-proxy-url.com
# 降低并发数和增加请求间隔(避免频率限制)
VITE_LLM_CONCURRENCY=1
VITE_LLM_GAP_MS=1000
```
#### 🔧 常见问题
<details>
<summary><b>Q: 如何快速切换LLM平台</b></summary>
只需修改 `VITE_LLM_PROVIDER` 的值即可:
```env
# 切换到OpenAI
VITE_LLM_PROVIDER=openai
VITE_OPENAI_API_KEY=your_openai_key
# 切换到Claude
VITE_LLM_PROVIDER=claude
VITE_CLAUDE_API_KEY=your_claude_key
# 切换到通义千问
VITE_LLM_PROVIDER=qwen
VITE_QWEN_API_KEY=your_qwen_key
```
</details>
<details>
<summary><b>Q: 遇到"请求超时"错误怎么办?</b></summary>
1. **增加超时时间**:在 `.env` 中设置 `VITE_LLM_TIMEOUT=300000`
2. **检查网络连接**确保能访问对应的API端点
3. **使用代理**如果API被墙配置 `VITE_LLM_BASE_URL` 使用代理
4. **切换平台**尝试其他LLM提供商如 DeepSeek国内访问快
</details>
<details>
<summary><b>Q: 如何使用国内平台避免网络问题?</b></summary>
推荐使用国内平台,访问速度更快:
```env
# 使用通义千问(推荐)
VITE_LLM_PROVIDER=qwen
VITE_QWEN_API_KEY=your_qwen_key
# 或使用DeepSeek性价比高
VITE_LLM_PROVIDER=deepseek
VITE_DEEPSEEK_API_KEY=your_deepseek_key
# 或使用智谱AI
VITE_LLM_PROVIDER=zhipu
VITE_ZHIPU_API_KEY=your_zhipu_key
```
</details>
<details>
<summary><b>Q: 百度文心一言的API Key格式是什么</b></summary>
百度API Key格式特殊需要同时提供API Key和Secret Key用冒号分隔
```env
VITE_LLM_PROVIDER=baidu
VITE_BAIDU_API_KEY=your_api_key:your_secret_key
VITE_BAIDU_MODEL=ERNIE-3.5-8K
```
可在[百度千帆平台](https://console.bce.baidu.com/qianfan/)获取API Key和Secret Key。
</details>
<details>
<summary><b>Q: 如何配置代理或中转服务?</b></summary>
使用 `VITE_LLM_BASE_URL` 配置自定义端点:
```env
# OpenAI中转示例
VITE_LLM_PROVIDER=openai
VITE_OPENAI_API_KEY=your_key
VITE_OPENAI_BASE_URL=https://api.your-proxy.com/v1
# 或使用通用配置
VITE_LLM_PROVIDER=openai
VITE_LLM_API_KEY=your_key
VITE_LLM_BASE_URL=https://api.your-proxy.com/v1
```
</details>
<details>
<summary><b>Q: 如何同时配置多个平台并快速切换?</b></summary>
`.env` 中配置所有平台的Key然后通过修改 `VITE_LLM_PROVIDER` 切换:
```env
# 当前使用的平台
VITE_LLM_PROVIDER=gemini
# 预配置所有平台
VITE_GEMINI_API_KEY=gemini_key
VITE_OPENAI_API_KEY=openai_key
VITE_CLAUDE_API_KEY=claude_key
VITE_QWEN_API_KEY=qwen_key
VITE_DEEPSEEK_API_KEY=deepseek_key
# 切换时只需修改第一行的provider值即可
```
</details>
### 🔑 获取 API Key
#### Google Gemini API Key预计后续会开放更多主流平台API功能
1. 访问 [Google AI Studio](https://makersuite.google.com/app/apikey)
2. 创建新的 API Key
3. 将 API Key 添加到 `.env` 文件中的 `VITE_GEMINI_API_KEY`
#### 🎯 支持的 LLM 平台
XCodeReviewer 现已支持多个主流 LLM 平台,您可以根据需求自由选择:
**国际平台:**
- **Google Gemini** - 推荐用于代码分析,免费配额充足 [获取API Key](https://makersuite.google.com/app/apikey)
- **OpenAI GPT** - 稳定可靠,综合性能最佳 [获取API Key](https://platform.openai.com/api-keys)
- **Anthropic Claude** - 代码理解能力强 [获取API Key](https://console.anthropic.com/)
- **DeepSeek** - 性价比高 [获取API Key](https://platform.deepseek.com/)
**国内平台:**
- **阿里云通义千问** [获取API Key](https://dashscope.console.aliyun.com/)
- **智谱AI (GLM)** [获取API Key](https://open.bigmodel.cn/)
- **月之暗面 Kimi** [获取API Key](https://platform.moonshot.cn/)
- **百度文心一言** [获取API Key](https://console.bce.baidu.com/qianfan/)
- **MiniMax** [获取API Key](https://www.minimaxi.com/)
- **字节豆包** [获取API Key](https://console.volcengine.com/ark)
#### 📝 配置示例
`.env` 文件中配置您选择的平台:
```env
# 方式一:使用通用配置(推荐)
VITE_LLM_PROVIDER=gemini # 选择提供商
VITE_LLM_API_KEY=your_api_key # 对应的API Key
VITE_LLM_MODEL=gemini-2.5-flash # 模型名称(可选)
# 方式二:使用平台专用配置
VITE_GEMINI_API_KEY=your_gemini_api_key
VITE_OPENAI_API_KEY=your_openai_api_key
VITE_CLAUDE_API_KEY=your_claude_api_key
# ... 其他平台配置
```
**快速切换平台:** 只需修改 `VITE_LLM_PROVIDER` 的值,即可在不同平台间自由切换!
> 💡 **提示:** 详细的配置说明请参考 `.env.example` 文件
#### Supabase 配置(可选)
1. 访问 [Supabase](https://supabase.com/) 创建新项目
@ -180,7 +345,7 @@
<details>
<summary><b>🧠 智能审计</b></summary>
- **AI 深度代码理解**基于 Google Gemini预计后续会开放更多主流平台API功能),提供超越关键词匹配的智能分析。
- **AI 深度代码理解**支持多个主流 LLM 平台Gemini、OpenAI、Claude、通义千问、DeepSeek 等),提供超越关键词匹配的智能分析。
- **五大核心维度检测**
- 🐛 **潜在 Bug**:精准捕捉逻辑错误、边界条件和空指针等问题。
- 🔒 **安全漏洞**:识别 SQL 注入、XSS、敏感信息泄露等安全风险。
@ -215,7 +380,7 @@
| **数据可视化** | `Recharts` | 专业的图表库,支持多种图表类型 |
| **路由管理** | `React Router v6` | 单页应用路由解决方案 |
| **状态管理** | `React Hooks` `Sonner` | 轻量级状态管理和通知系统 |
| **AI 引擎** | `Google Gemini 2.5 Flash` 预计后续会开放更多主流平台API功能| 强大的大语言模型,支持代码分析 |
| **AI 引擎** | `多平台 LLM` | 支持 Gemini、OpenAI、Claude、通义千问、DeepSeek 等 10+ 主流平台 |
| **后端服务** | `Supabase` `PostgreSQL` | 全栈后端即服务,实时数据库 |
| **HTTP 客户端** | `Axios` `Ky` | 现代化的 HTTP 请求库 |
| **代码质量** | `Biome` `Ast-grep` `TypeScript` | 代码格式化、静态分析和类型检查 |
@ -301,14 +466,67 @@ pnpm lint
```
### 环境变量说明
#### 核心LLM配置
| 变量名 | 必需 | 默认值 | 说明 |
|--------|------|--------|------|
| `VITE_LLM_PROVIDER` | ✅ | `gemini` | LLM提供商`gemini`\|`openai`\|`claude`\|`qwen`\|`deepseek`\|`zhipu`\|`moonshot`\|`baidu`\|`minimax`\|`doubao` |
| `VITE_LLM_API_KEY` | ✅ | - | 通用API Key优先级高于平台专用配置 |
| `VITE_LLM_MODEL` | ❌ | 自动 | 模型名称(不指定则使用各平台默认模型) |
| `VITE_LLM_BASE_URL` | ❌ | - | 自定义API端点用于代理、中转或私有部署 |
| `VITE_LLM_TIMEOUT` | ❌ | `150000` | 请求超时时间(毫秒) |
| `VITE_LLM_TEMPERATURE` | ❌ | `0.2` | 温度参数0.0-2.0),控制输出随机性 |
| `VITE_LLM_MAX_TOKENS` | ❌ | `4096` | 最大输出token数 |
#### 平台专用API Key配置可选
| 变量名 | 说明 | 特殊要求 |
|--------|------|---------|
| `VITE_GEMINI_API_KEY` | Google Gemini API Key | - |
| `VITE_GEMINI_MODEL` | Gemini模型 (默认: gemini-2.5-flash) | - |
| `VITE_OPENAI_API_KEY` | OpenAI API Key | - |
| `VITE_OPENAI_MODEL` | OpenAI模型 (默认: gpt-4o-mini) | - |
| `VITE_OPENAI_BASE_URL` | OpenAI自定义端点 | 用于中转服务 |
| `VITE_CLAUDE_API_KEY` | Anthropic Claude API Key | - |
| `VITE_CLAUDE_MODEL` | Claude模型 (默认: claude-3-5-sonnet-20241022) | - |
| `VITE_QWEN_API_KEY` | 阿里云通义千问 API Key | - |
| `VITE_QWEN_MODEL` | 通义千问模型 (默认: qwen-turbo) | - |
| `VITE_DEEPSEEK_API_KEY` | DeepSeek API Key | - |
| `VITE_DEEPSEEK_MODEL` | DeepSeek模型 (默认: deepseek-chat) | - |
| `VITE_ZHIPU_API_KEY` | 智谱AI API Key | - |
| `VITE_ZHIPU_MODEL` | 智谱模型 (默认: glm-4-flash) | - |
| `VITE_MOONSHOT_API_KEY` | 月之暗面 Kimi API Key | - |
| `VITE_MOONSHOT_MODEL` | Kimi模型 (默认: moonshot-v1-8k) | - |
| `VITE_BAIDU_API_KEY` | 百度文心一言 API Key | ⚠️ 格式: `API_KEY:SECRET_KEY` |
| `VITE_BAIDU_MODEL` | 文心模型 (默认: ERNIE-3.5-8K) | - |
| `VITE_MINIMAX_API_KEY` | MiniMax API Key | - |
| `VITE_MINIMAX_MODEL` | MiniMax模型 (默认: abab6.5-chat) | - |
| `VITE_DOUBAO_API_KEY` | 字节豆包 API Key | - |
| `VITE_DOUBAO_MODEL` | 豆包模型 (默认: doubao-pro-32k) | - |
#### 数据库配置(可选)
| 变量名 | 必需 | 说明 |
|--------|------|------|
| `VITE_GEMINI_API_KEY` | ✅ | Google Gemini API 密钥 |
| `VITE_GEMINI_MODEL` | ❌ | AI 模型名称 (默认: gemini-2.5-flash) |
| `VITE_GEMINI_TIMEOUT_MS` | ❌ | 请求超时时间 (默认: 25000ms) |
| `VITE_SUPABASE_URL` | ❌ | Supabase 项目 URL |
| `VITE_SUPABASE_ANON_KEY` | ❌ | Supabase 匿名密钥 |
| `VITE_APP_ID` | ❌ | 应用标识符 (默认: xcodereviewer) |
| `VITE_SUPABASE_URL` | ❌ | Supabase项目URL用于数据持久化 |
| `VITE_SUPABASE_ANON_KEY` | ❌ | Supabase匿名密钥 |
> 💡 **提示**不配置Supabase时系统以演示模式运行数据不持久化
#### GitHub集成配置可选
| 变量名 | 必需 | 说明 |
|--------|------|------|
| `VITE_GITHUB_TOKEN` | ❌ | GitHub Personal Access Token用于仓库分析功能 |
#### 分析行为配置
| 变量名 | 默认值 | 说明 |
|--------|--------|------|
| `VITE_MAX_ANALYZE_FILES` | `40` | 单次分析的最大文件数 |
| `VITE_LLM_CONCURRENCY` | `2` | LLM并发请求数降低可避免频率限制 |
| `VITE_LLM_GAP_MS` | `500` | LLM请求间隔毫秒增加可避免频率限制 |
#### 应用配置
| 变量名 | 默认值 | 说明 |
|--------|--------|------|
| `VITE_APP_ID` | `xcodereviewer` | 应用标识符 |
## 🤝 贡献指南
@ -341,7 +559,8 @@ pnpm lint
目前 XCodeReviewer 定位为快速原型验证阶段,功能需要逐渐完善,根据项目后续发展和大家的建议,未来开发计划如下(尽快实现):
- **多平台/本地模型支持**: 未来会尽快加入OpenAI、Claude、通义千问等各大国内外主流模型API调用功能以及对本地大模型调用的功能满足数据隐私需求
- **✅ 多平台LLM支持**: 已实现 10+ 主流平台API调用功能Gemini、OpenAI、Claude、通义千问、DeepSeek、智谱AI、Kimi、文心一言、MiniMax、豆包支持用户自由配置和切换
- **本地模型支持**: 计划加入对本地大模型(如 Ollama的调用功能满足数据隐私需求
- **Multi-Agent Collaboration**: 考虑引入多智能体协作架构,会实现`Agent+人工对话`反馈的功能,包括多轮对话流程展示,人工对话中断干涉等,以获得更清晰、透明、监督性的审计过程,提升审计质量
- **专业报告文件生成**: 根据不同的需求生成相关格式的专业审计报告文件,支持文件报告格式定制等
- **审计标准自定义**: 不同团队有自己的编码规范,不同项目有特定的安全要求,也正是我们这个项目想后续做的东西。当前的版本还属于一个“半黑盒模式”,项目通过 Prompt 工程来引导分析方向和定义审计标准实际分析效果由强大的预训练AI 模型内置知识决定。后续将结合强化学习、监督学习微调等方法开发以支持自定义规则配置通过YAML或者JSON定义团队特定规则提供常见框架的最佳实践模板等等以获得更加符合需求和标准的审计结果

View File

@ -62,7 +62,13 @@ Deploy quickly using Docker without Node.js environment setup.
2. **Configure environment variables**
```bash
cp .env.example .env
# Edit .env file and set at least VITE_GEMINI_API_KEY
# Edit .env file and configure LLM provider and API Key
# Method 1: Using Universal Configuration (Recommended)
# VITE_LLM_PROVIDER=gemini
# VITE_LLM_API_KEY=your_api_key
#
# Method 2: Using Platform-Specific Configuration
# VITE_GEMINI_API_KEY=your_gemini_api_key
```
3. **Build and start**
@ -120,10 +126,16 @@ For development or custom modifications, use local deployment.
Edit the `.env` file and configure the necessary environment variables:
```env
# Google Gemini AI Configuration (Required)
# LLM Universal Configuration (Recommended)
VITE_LLM_PROVIDER=gemini # Choose provider (gemini|openai|claude|qwen|deepseek, etc.)
VITE_LLM_API_KEY=your_api_key_here # Corresponding API Key
VITE_LLM_MODEL=gemini-2.5-flash # Model name (optional)
# Or use platform-specific configuration
VITE_GEMINI_API_KEY=your_gemini_api_key_here
VITE_GEMINI_MODEL=gemini-2.5-flash
VITE_GEMINI_TIMEOUT_MS=25000
VITE_OPENAI_API_KEY=your_openai_api_key_here
VITE_CLAUDE_API_KEY=your_claude_api_key_here
# ... Supports 10+ mainstream platforms
# Supabase Configuration (Optional, for data persistence)
VITE_SUPABASE_URL=https://your-project.supabase.co
@ -149,12 +161,166 @@ For development or custom modifications, use local deployment.
5. **Access the application**
Open `http://localhost:5174` in your browser
#### ⚙️ Advanced Configuration (Optional)
If you encounter timeout or connection issues, adjust these settings:
```env
# Increase timeout (default 150000ms)
VITE_LLM_TIMEOUT=150000
# Use custom API endpoint (for proxy or private deployment)
VITE_LLM_BASE_URL=https://your-proxy-url.com
# Reduce concurrency and increase request gap (to avoid rate limiting)
VITE_LLM_CONCURRENCY=1
VITE_LLM_GAP_MS=1000
```
#### 🔧 FAQ
<details>
<summary><b>Q: How to quickly switch between LLM platforms?</b></summary>
Simply modify the `VITE_LLM_PROVIDER` value:
```env
# Switch to OpenAI
VITE_LLM_PROVIDER=openai
VITE_OPENAI_API_KEY=your_openai_key
# Switch to Claude
VITE_LLM_PROVIDER=claude
VITE_CLAUDE_API_KEY=your_claude_key
# Switch to Qwen
VITE_LLM_PROVIDER=qwen
VITE_QWEN_API_KEY=your_qwen_key
```
</details>
<details>
<summary><b>Q: What to do when encountering "Request Timeout" error?</b></summary>
1. **Increase timeout**: Set `VITE_LLM_TIMEOUT=300000` in `.env` (5 minutes)
2. **Check network connection**: Ensure you can access the API endpoint
3. **Use proxy**: Configure `VITE_LLM_BASE_URL` if API is blocked
4. **Switch platform**: Try other LLM providers, such as DeepSeek (good for China)
</details>
<details>
<summary><b>Q: How to use Chinese platforms to avoid network issues?</b></summary>
Recommended Chinese platforms for faster access:
```env
# Use Qwen (Recommended)
VITE_LLM_PROVIDER=qwen
VITE_QWEN_API_KEY=your_qwen_key
# Or use DeepSeek (Cost-effective)
VITE_LLM_PROVIDER=deepseek
VITE_DEEPSEEK_API_KEY=your_deepseek_key
# Or use Zhipu AI
VITE_LLM_PROVIDER=zhipu
VITE_ZHIPU_API_KEY=your_zhipu_key
```
</details>
<details>
<summary><b>Q: What's the API Key format for Baidu ERNIE?</b></summary>
Baidu API Key requires both API Key and Secret Key, separated by colon:
```env
VITE_LLM_PROVIDER=baidu
VITE_BAIDU_API_KEY=your_api_key:your_secret_key
VITE_BAIDU_MODEL=ERNIE-3.5-8K
```
Get API Key and Secret Key from [Baidu Qianfan Platform](https://console.bce.baidu.com/qianfan/).
</details>
<details>
<summary><b>Q: How to configure proxy or relay service?</b></summary>
Use `VITE_LLM_BASE_URL` to configure custom endpoint:
```env
# OpenAI relay example
VITE_LLM_PROVIDER=openai
VITE_OPENAI_API_KEY=your_key
VITE_OPENAI_BASE_URL=https://api.your-proxy.com/v1
# Or use universal config
VITE_LLM_PROVIDER=openai
VITE_LLM_API_KEY=your_key
VITE_LLM_BASE_URL=https://api.your-proxy.com/v1
```
</details>
<details>
<summary><b>Q: How to configure multiple platforms and switch quickly?</b></summary>
Configure all platform keys in `.env`, then switch by modifying `VITE_LLM_PROVIDER`:
```env
# Currently active platform
VITE_LLM_PROVIDER=gemini
# Pre-configure all platforms
VITE_GEMINI_API_KEY=gemini_key
VITE_OPENAI_API_KEY=openai_key
VITE_CLAUDE_API_KEY=claude_key
VITE_QWEN_API_KEY=qwen_key
VITE_DEEPSEEK_API_KEY=deepseek_key
# Just modify the first line's provider value to switch
```
</details>
### 🔑 Getting API Keys
#### Google Gemini API Key(It is expected that more mainstream platform API functions will be opened in the future)
1. Visit [Google AI Studio](https://makersuite.google.com/app/apikey)
2. Create a new API Key
3. Add the API Key to `VITE_GEMINI_API_KEY` in your `.env` file
#### 🎯 Supported LLM Platforms
XCodeReviewer now supports multiple mainstream LLM platforms. You can choose freely based on your needs:
**International Platforms:**
- **Google Gemini** - Recommended for code analysis, generous free tier [Get API Key](https://makersuite.google.com/app/apikey)
- **OpenAI GPT** - Stable and reliable, best overall performance [Get API Key](https://platform.openai.com/api-keys)
- **Anthropic Claude** - Strong code understanding capabilities [Get API Key](https://console.anthropic.com/)
- **DeepSeek** - Cost-effective [Get API Key](https://platform.deepseek.com/)
**Chinese Platforms:**
- **Alibaba Qwen (通义千问)** [Get API Key](https://dashscope.console.aliyun.com/)
- **Zhipu AI (GLM)** [Get API Key](https://open.bigmodel.cn/)
- **Moonshot (Kimi)** [Get API Key](https://platform.moonshot.cn/)
- **Baidu ERNIE (文心一言)** [Get API Key](https://console.bce.baidu.com/qianfan/)
- **MiniMax** [Get API Key](https://www.minimaxi.com/)
- **Bytedance Doubao (豆包)** [Get API Key](https://console.volcengine.com/ark)
#### 📝 Configuration Examples
Configure your chosen platform in the `.env` file:
```env
# Method 1: Using Universal Configuration (Recommended)
VITE_LLM_PROVIDER=gemini # Choose provider
VITE_LLM_API_KEY=your_api_key # Corresponding API Key
VITE_LLM_MODEL=gemini-2.5-flash # Model name (optional)
# Method 2: Using Platform-Specific Configuration
VITE_GEMINI_API_KEY=your_gemini_api_key
VITE_OPENAI_API_KEY=your_openai_api_key
VITE_CLAUDE_API_KEY=your_claude_api_key
# ... Other platform configurations
```
**Quick Platform Switch:** Simply modify the value of `VITE_LLM_PROVIDER` to switch between different platforms!
> 💡 **Tip:** For detailed configuration instructions, please refer to the `.env.example` file
#### Supabase Configuration (Optional)
1. Visit [Supabase](https://supabase.com/) to create a new project
@ -187,7 +353,7 @@ For development or custom modifications, use local deployment.
<details>
<summary><b>🧠 Intelligent Auditing</b></summary>
- **AI Deep Code Understanding**: Based on Google Gemini(It is expected that more mainstream platform API functions will be opened in the future), providing intelligent analysis beyond keyword matching.
- **AI Deep Code Understanding**: Supports multiple mainstream LLM platforms (Gemini, OpenAI, Claude, Qwen, DeepSeek, etc.), providing intelligent analysis beyond keyword matching.
- **Five Core Detection Dimensions**:
- 🐛 **Potential Bugs**: Precisely capture logical errors, boundary conditions, and null pointer issues.
- 🔒 **Security Vulnerabilities**: Identify SQL injection, XSS, sensitive information leakage, and other security risks.
@ -222,7 +388,7 @@ For development or custom modifications, use local deployment.
| **Data Visualization** | `Recharts` | Professional chart library supporting multiple chart types |
| **Routing** | `React Router v6` | Single-page application routing solution |
| **State Management** | `React Hooks` `Sonner` | Lightweight state management and notification system |
| **AI Engine** | `Google Gemini 2.5 Flash`(It is expected that more mainstream platform API functions will be opened in the future) | Powerful large language model supporting code analysis |
| **AI Engine** | `Multi-Platform LLM` | Supports 10+ mainstream platforms including Gemini, OpenAI, Claude, Qwen, DeepSeek |
| **Backend Service** | `Supabase` `PostgreSQL` | Full-stack backend-as-a-service with real-time database |
| **HTTP Client** | `Axios` `Ky` | Modern HTTP request libraries |
| **Code Quality** | `Biome` `Ast-grep` `TypeScript` | Code formatting, static analysis, and type checking |
@ -308,17 +474,66 @@ pnpm lint
### Environment Variables
#### Core LLM Configuration
| Variable | Required | Default | Description |
|----------|----------|---------|-------------|
| `VITE_LLM_PROVIDER` | ✅ | `gemini` | LLM provider: `gemini`\|`openai`\|`claude`\|`qwen`\|`deepseek`\|`zhipu`\|`moonshot`\|`baidu`\|`minimax`\|`doubao` |
| `VITE_LLM_API_KEY` | ✅ | - | Universal API Key (higher priority than platform-specific config) |
| `VITE_LLM_MODEL` | ❌ | Auto | Model name (uses platform default if not specified) |
| `VITE_LLM_BASE_URL` | ❌ | - | Custom API endpoint (for proxy, relay, or private deployment) |
| `VITE_LLM_TIMEOUT` | ❌ | `150000` | Request timeout (milliseconds) |
| `VITE_LLM_TEMPERATURE` | ❌ | `0.2` | Temperature parameter (0.0-2.0), controls output randomness |
| `VITE_LLM_MAX_TOKENS` | ❌ | `4096` | Maximum output tokens |
#### Platform-Specific API Key Configuration (Optional)
| Variable | Description | Special Requirements |
|----------|-------------|---------------------|
| `VITE_GEMINI_API_KEY` | Google Gemini API Key | - |
| `VITE_GEMINI_MODEL` | Gemini model (default: gemini-2.5-flash) | - |
| `VITE_OPENAI_API_KEY` | OpenAI API Key | - |
| `VITE_OPENAI_MODEL` | OpenAI model (default: gpt-4o-mini) | - |
| `VITE_OPENAI_BASE_URL` | OpenAI custom endpoint | For relay services |
| `VITE_CLAUDE_API_KEY` | Anthropic Claude API Key | - |
| `VITE_CLAUDE_MODEL` | Claude model (default: claude-3-5-sonnet-20241022) | - |
| `VITE_QWEN_API_KEY` | Alibaba Qwen API Key | - |
| `VITE_QWEN_MODEL` | Qwen model (default: qwen-turbo) | - |
| `VITE_DEEPSEEK_API_KEY` | DeepSeek API Key | - |
| `VITE_DEEPSEEK_MODEL` | DeepSeek model (default: deepseek-chat) | - |
| `VITE_ZHIPU_API_KEY` | Zhipu AI API Key | - |
| `VITE_ZHIPU_MODEL` | Zhipu model (default: glm-4-flash) | - |
| `VITE_MOONSHOT_API_KEY` | Moonshot Kimi API Key | - |
| `VITE_MOONSHOT_MODEL` | Kimi model (default: moonshot-v1-8k) | - |
| `VITE_BAIDU_API_KEY` | Baidu ERNIE API Key | ⚠️ Format: `API_KEY:SECRET_KEY` |
| `VITE_BAIDU_MODEL` | ERNIE model (default: ERNIE-3.5-8K) | - |
| `VITE_MINIMAX_API_KEY` | MiniMax API Key | - |
| `VITE_MINIMAX_MODEL` | MiniMax model (default: abab6.5-chat) | - |
| `VITE_DOUBAO_API_KEY` | Bytedance Doubao API Key | - |
| `VITE_DOUBAO_MODEL` | Doubao model (default: doubao-pro-32k) | - |
#### Database Configuration (Optional)
| Variable | Required | Description |
|----------|----------|-------------|
| `VITE_GEMINI_API_KEY` | ✅ | Google Gemini API key |
| `VITE_GEMINI_MODEL` | ❌ | AI model name (default: gemini-2.5-flash) |
| `VITE_GEMINI_TIMEOUT_MS` | ❌ | Request timeout (default: 25000ms) |
| `VITE_SUPABASE_URL` | ❌ | Supabase project URL |
| `VITE_SUPABASE_URL` | ❌ | Supabase project URL (for data persistence) |
| `VITE_SUPABASE_ANON_KEY` | ❌ | Supabase anonymous key |
| `VITE_APP_ID` | ❌ | Application identifier (default: xcodereviewer) |
| `VITE_MAX_ANALYZE_FILES` | ❌ | Maximum files to analyze (default: 40) |
| `VITE_LLM_CONCURRENCY` | ❌ | LLM concurrency limit (default: 2) |
| `VITE_LLM_GAP_MS` | ❌ | Gap between LLM requests (default: 500ms) |
> 💡 **Note**: Without Supabase config, system runs in demo mode without data persistence
#### GitHub Integration Configuration (Optional)
| Variable | Required | Description |
|----------|----------|-------------|
| `VITE_GITHUB_TOKEN` | ❌ | GitHub Personal Access Token (for repository analysis) |
#### Analysis Behavior Configuration
| Variable | Default | Description |
|----------|---------|-------------|
| `VITE_MAX_ANALYZE_FILES` | `40` | Maximum files per analysis |
| `VITE_LLM_CONCURRENCY` | `2` | LLM concurrent requests (reduce to avoid rate limiting) |
| `VITE_LLM_GAP_MS` | `500` | Gap between LLM requests (milliseconds, increase to avoid rate limiting) |
#### Application Configuration
| Variable | Default | Description |
|----------|---------|-------------|
| `VITE_APP_ID` | `xcodereviewer` | Application identifier |
## 🤝 Contributing
@ -351,7 +566,8 @@ We warmly welcome all forms of contributions! Whether it's submitting issues, cr
Currently, XCodeReviewer is positioned in the rapid prototype verification stage, and its functions need to be gradually improved. Based on the subsequent development of the project and everyone's suggestions, the future development plan is as follows (to be implemented as soon as possible):
- **Multi-platform/Local Model Support**: In the future, we will quickly add API calling functions for major mainstream models at home and abroad, such as OpenAI, Claude, Tongyi Qianwen, etc. And the function of calling local large models (to meet data privacy requirements).
- **✅ Multi-Platform LLM Support**: Implemented API calling functionality for 10+ mainstream platforms (Gemini, OpenAI, Claude, Qwen, DeepSeek, Zhipu AI, Kimi, ERNIE, MiniMax, Doubao), with support for free configuration and switching
- **Local Model Support**: Planning to add support for local large models (such as Ollama) to meet data privacy requirements
- **Multi-Agent Collaboration**: Consider introducing a multi-agent collaboration architecture, which will implement the `Agent + Human Dialogue` feedback function, including multi-round dialogue process display, human dialogue interruption intervention, etc., to obtain a clearer, more transparent, and supervised auditing process, thereby improving audit quality.
- **Professional Report File Generation**: Generate professional audit report files in relevant formats according to different needs, supporting customization of file report formats, etc.
- **Custom Audit Standards**: Different teams have their own coding standards, and different projects have specific security requirements, which is exactly what we want to do next in this project. The current version is still in a "semi-black box mode", where the project guides the analysis direction and defines audit standards through Prompt engineering, and the actual analysis effect is determined by the built-in knowledge of powerful pre-trained AI models. In the future, we will combine methods such as reinforcement learning and supervised learning fine-tuning to develop support for custom rule configuration, define team-specific rules through YAML or JSON, provide best practice templates for common frameworks, etc., to obtain audit results that are more in line with requirements and standards.

View File

@ -1,5 +1,7 @@
import type { CodeAnalysisResult } from "@/types/types";
import { GoogleGenerativeAI } from "@google/generative-ai";
import { LLMService } from '@/shared/services/llm';
import { getCurrentLLMApiKey, getCurrentLLMModel, env } from '@/shared/config/env';
import type { LLMConfig } from '@/shared/services/llm/types';
// 基于 LLM 的代码分析引擎
export class CodeAnalysisEngine {
@ -11,49 +13,30 @@ export class CodeAnalysisEngine {
return [...this.SUPPORTED_LANGUAGES];
}
static async analyzeCode(code: string, language: string): Promise<CodeAnalysisResult> {
const apiKey = import.meta.env.VITE_GEMINI_API_KEY as string | undefined;
/**
* LLM服务实例
*/
private static createLLMService(): LLMService {
const apiKey = getCurrentLLMApiKey();
if (!apiKey) {
throw new Error('缺少 VITE_GEMINI_API_KEY 环境变量,请在 .env 中配置');
throw new Error(`缺少 ${env.LLM_PROVIDER} API Key请在 .env 中配置`);
}
const genAI = new GoogleGenerativeAI(apiKey);
const primaryModel = (import.meta.env.VITE_GEMINI_MODEL as string) || 'gemini-2.5-flash';
const fallbacks = ['gemini-1.5-flash'];
const requestWithTimeout = async (m: string, promptText: string, timeoutMs: number) => {
const controller = new AbortController();
const timer = setTimeout(() => controller.abort(), timeoutMs);
try {
const mdl = genAI.getGenerativeModel({ model: m });
const res = await mdl.generateContent({
contents: [{ role: 'user', parts: [{ text: promptText }] }],
safetySettings: [],
generationConfig: { temperature: 0.2 }
}, { signal: controller.signal as any });
return res.response.text();
} finally {
clearTimeout(timer);
}
const config: LLMConfig = {
provider: env.LLM_PROVIDER as any,
apiKey,
model: getCurrentLLMModel(),
baseUrl: env.LLM_BASE_URL,
timeout: env.LLM_TIMEOUT,
temperature: env.LLM_TEMPERATURE,
maxTokens: env.LLM_MAX_TOKENS,
};
const generateWithRetry = async (promptText: string) => {
const models = [primaryModel, ...fallbacks];
const maxAttempts = 3;
const timeoutMs = Number(import.meta.env.VITE_GEMINI_TIMEOUT_MS || 25000);
let lastError: any = null;
for (const m of models) {
for (let i = 0; i < maxAttempts; i++) {
try {
return await requestWithTimeout(m, prompt, timeoutMs);
} catch (err: any) {
lastError = err;
await new Promise(r => setTimeout(r, 1000 * Math.pow(2, i))); // 1s,2s,4s
}
}
}
throw lastError;
};
return new LLMService(config);
}
static async analyzeCode(code: string, language: string): Promise<CodeAnalysisResult> {
const llmService = this.createLLMService();
const schema = `{
"issues": [
@ -91,32 +74,39 @@ export class CodeAnalysisEngine {
}
}`;
const prompt = [
`请请严格使用中文。你是一个专业代码审计助手。请从编码规范、潜在Bug、性能问题、安全漏洞、可维护性、最佳实践等维度分析代码并严格输出 JSON仅 JSON符合以下 schema`,
schema,
`语言: ${language}`,
`代码: \n\n${code}`
].join('\n\n');
const systemPrompt = `请严格使用中文。你是一个专业代码审计助手。请从编码规范、潜在Bug、性能问题、安全漏洞、可维护性、最佳实践等维度分析代码并严格输出 JSON仅 JSON符合以下 schema\n\n${schema}`;
const userPrompt = `语言: ${language}\n\n代码:\n\n${code}`;
let text = '';
try {
text = await generateWithRetry(prompt);
} catch (e) {
// 全部超时/失败时,返回兜底估算结果
const fallbackIssues: any[] = [];
const fallbackMetrics = this.estimateMetricsFromIssues(fallbackIssues);
return {
issues: fallbackIssues,
quality_score: this.calculateQualityScore(fallbackMetrics, fallbackIssues),
summary: {
total_issues: 0,
critical_issues: 0,
high_issues: 0,
medium_issues: 0,
low_issues: 0,
},
metrics: fallbackMetrics
} as CodeAnalysisResult;
// 使用新的LLM服务进行分析
const response = await llmService.complete({
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userPrompt },
],
temperature: 0.2,
});
text = response.content;
} catch (e: any) {
console.error('LLM分析失败:', e);
// 构造更友好的错误消息
const errorMsg = e.message || '未知错误';
const provider = env.LLM_PROVIDER;
// 抛出详细的错误信息给前端
throw new Error(
`${provider} API调用失败\n\n` +
`错误详情:${errorMsg}\n\n` +
`配置检查:\n` +
`- 提供商:${provider}\n` +
`- 模型:${getCurrentLLMModel() || '(使用默认)'}\n` +
`- API Key${getCurrentLLMApiKey() ? '已配置' : '未配置'}\n` +
`- 超时设置:${env.LLM_TIMEOUT}ms\n\n` +
`请检查.env配置文件或尝试切换其他LLM提供商`
);
}
const parsed = this.safeParseJson(text);

View File

@ -1,38 +1,147 @@
// 环境变量配置
export const env = {
// Gemini AI 配置
// ==================== LLM 通用配置 ====================
// 当前使用的LLM提供商 (gemini|openai|claude|qwen|deepseek|zhipu|moonshot|baidu|minimax|doubao)
LLM_PROVIDER: import.meta.env.VITE_LLM_PROVIDER || 'gemini',
// LLM API Key
LLM_API_KEY: import.meta.env.VITE_LLM_API_KEY || '',
// LLM 模型名称
LLM_MODEL: import.meta.env.VITE_LLM_MODEL || '',
// LLM API 基础URL (可选,用于自定义端点或代理)
LLM_BASE_URL: import.meta.env.VITE_LLM_BASE_URL || '',
// LLM 请求超时时间(ms)
LLM_TIMEOUT: Number(import.meta.env.VITE_LLM_TIMEOUT) || 150000,
// LLM 温度参数 (0.0-2.0)
LLM_TEMPERATURE: Number(import.meta.env.VITE_LLM_TEMPERATURE) || 0.2,
// LLM 最大token数
LLM_MAX_TOKENS: Number(import.meta.env.VITE_LLM_MAX_TOKENS) || 4096,
// ==================== Gemini AI 配置 (兼容旧配置) ====================
GEMINI_API_KEY: import.meta.env.VITE_GEMINI_API_KEY || '',
GEMINI_MODEL: import.meta.env.VITE_GEMINI_MODEL || 'gemini-2.5-flash',
GEMINI_TIMEOUT_MS: Number(import.meta.env.VITE_GEMINI_TIMEOUT_MS) || 25000,
// Supabase 配置
// ==================== OpenAI 配置 ====================
OPENAI_API_KEY: import.meta.env.VITE_OPENAI_API_KEY || '',
OPENAI_MODEL: import.meta.env.VITE_OPENAI_MODEL || 'gpt-4o-mini',
OPENAI_BASE_URL: import.meta.env.VITE_OPENAI_BASE_URL || '',
// ==================== Claude 配置 ====================
CLAUDE_API_KEY: import.meta.env.VITE_CLAUDE_API_KEY || '',
CLAUDE_MODEL: import.meta.env.VITE_CLAUDE_MODEL || 'claude-3-5-sonnet-20241022',
// ==================== 通义千问 配置 ====================
QWEN_API_KEY: import.meta.env.VITE_QWEN_API_KEY || '',
QWEN_MODEL: import.meta.env.VITE_QWEN_MODEL || 'qwen-turbo',
// ==================== DeepSeek 配置 ====================
DEEPSEEK_API_KEY: import.meta.env.VITE_DEEPSEEK_API_KEY || '',
DEEPSEEK_MODEL: import.meta.env.VITE_DEEPSEEK_MODEL || 'deepseek-chat',
// ==================== 智谱AI 配置 ====================
ZHIPU_API_KEY: import.meta.env.VITE_ZHIPU_API_KEY || '',
ZHIPU_MODEL: import.meta.env.VITE_ZHIPU_MODEL || 'glm-4-flash',
// ==================== Moonshot 配置 ====================
MOONSHOT_API_KEY: import.meta.env.VITE_MOONSHOT_API_KEY || '',
MOONSHOT_MODEL: import.meta.env.VITE_MOONSHOT_MODEL || 'moonshot-v1-8k',
// ==================== 百度文心一言 配置 ====================
BAIDU_API_KEY: import.meta.env.VITE_BAIDU_API_KEY || '',
BAIDU_MODEL: import.meta.env.VITE_BAIDU_MODEL || 'ERNIE-3.5-8K',
// ==================== MiniMax 配置 ====================
MINIMAX_API_KEY: import.meta.env.VITE_MINIMAX_API_KEY || '',
MINIMAX_MODEL: import.meta.env.VITE_MINIMAX_MODEL || 'abab6.5-chat',
// ==================== 豆包 配置 ====================
DOUBAO_API_KEY: import.meta.env.VITE_DOUBAO_API_KEY || '',
DOUBAO_MODEL: import.meta.env.VITE_DOUBAO_MODEL || 'doubao-pro-32k',
// ==================== Supabase 配置 ====================
SUPABASE_URL: import.meta.env.VITE_SUPABASE_URL || '',
SUPABASE_ANON_KEY: import.meta.env.VITE_SUPABASE_ANON_KEY || '',
// GitHub 配置
// ==================== GitHub 配置 ====================
GITHUB_TOKEN: import.meta.env.VITE_GITHUB_TOKEN || '',
// 应用配置
// ==================== 应用配置 ====================
APP_ID: import.meta.env.VITE_APP_ID || 'xcodereviewer',
// 分析配置
// ==================== 分析配置 ====================
MAX_ANALYZE_FILES: Number(import.meta.env.VITE_MAX_ANALYZE_FILES) || 40,
LLM_CONCURRENCY: Number(import.meta.env.VITE_LLM_CONCURRENCY) || 2,
LLM_GAP_MS: Number(import.meta.env.VITE_LLM_GAP_MS) || 500,
// 开发环境标识
// ==================== 开发环境标识 ====================
isDev: import.meta.env.DEV,
isProd: import.meta.env.PROD,
} as const;
/**
* LLM服务的API Key
*/
export function getCurrentLLMApiKey(): string {
const provider = env.LLM_PROVIDER.toLowerCase();
// 优先使用通用配置
if (env.LLM_API_KEY) {
return env.LLM_API_KEY;
}
// 根据provider获取对应的API Key
const providerKeyMap: Record<string, string> = {
gemini: env.GEMINI_API_KEY,
openai: env.OPENAI_API_KEY,
claude: env.CLAUDE_API_KEY,
qwen: env.QWEN_API_KEY,
deepseek: env.DEEPSEEK_API_KEY,
zhipu: env.ZHIPU_API_KEY,
moonshot: env.MOONSHOT_API_KEY,
baidu: env.BAIDU_API_KEY,
minimax: env.MINIMAX_API_KEY,
doubao: env.DOUBAO_API_KEY,
};
return providerKeyMap[provider] || '';
}
/**
* LLM模型
*/
export function getCurrentLLMModel(): string {
const provider = env.LLM_PROVIDER.toLowerCase();
// 优先使用通用配置
if (env.LLM_MODEL) {
return env.LLM_MODEL;
}
// 根据provider获取对应的模型
const providerModelMap: Record<string, string> = {
gemini: env.GEMINI_MODEL,
openai: env.OPENAI_MODEL,
claude: env.CLAUDE_MODEL,
qwen: env.QWEN_MODEL,
deepseek: env.DEEPSEEK_MODEL,
zhipu: env.ZHIPU_MODEL,
moonshot: env.MOONSHOT_MODEL,
baidu: env.BAIDU_MODEL,
minimax: env.MINIMAX_MODEL,
doubao: env.DOUBAO_MODEL,
};
return providerModelMap[provider] || '';
}
// 验证必需的环境变量
export function validateEnv() {
const requiredVars = ['GEMINI_API_KEY'];
const missing = requiredVars.filter(key => !env[key as keyof typeof env]);
const apiKey = getCurrentLLMApiKey();
if (missing.length > 0) {
console.warn(`Missing required environment variables: ${missing.join(', ')}`);
if (!apiKey) {
console.warn(`未配置 ${env.LLM_PROVIDER} 的API Key请在环境变量中配置`);
return false;
}
return missing.length === 0;
return true;
}

View File

@ -0,0 +1,123 @@
/**
*
*/
import { BaseLLMAdapter } from '../base-adapter';
import type { LLMRequest, LLMResponse } from '../types';
export class BaiduAdapter extends BaseLLMAdapter {
private baseUrl: string;
private accessToken?: string;
private tokenExpiry?: number;
constructor(config: any) {
super(config);
this.baseUrl = config.baseUrl || 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1';
}
async complete(request: LLMRequest): Promise<LLMResponse> {
try {
await this.validateConfig();
await this.ensureAccessToken();
return await this.retry(async () => {
return await this.withTimeout(this._sendRequest(request));
});
} catch (error) {
this.handleError(error, '文心一言API调用失败');
}
}
private async ensureAccessToken(): Promise<void> {
// 如果token存在且未过期直接返回
if (this.accessToken && this.tokenExpiry && Date.now() < this.tokenExpiry) {
return;
}
// 文心一言API Key格式为 "API_KEY:SECRET_KEY"
const [apiKey, secretKey] = this.config.apiKey.split(':');
if (!apiKey || !secretKey) {
throw new Error('百度API Key格式错误应为 "API_KEY:SECRET_KEY"');
}
const tokenUrl = `https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=${apiKey}&client_secret=${secretKey}`;
const response = await fetch(tokenUrl, { method: 'POST' });
if (!response.ok) {
throw new Error('获取百度access_token失败');
}
const data = await response.json();
this.accessToken = data.access_token;
// 设置过期时间为29天后百度token有效期30天
this.tokenExpiry = Date.now() + 29 * 24 * 60 * 60 * 1000;
}
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
const endpoint = this.getModelEndpoint(this.config.model);
const url = `${this.baseUrl}/wenxinworkshop/chat/${endpoint}?access_token=${this.accessToken}`;
const response = await fetch(url, {
method: 'POST',
headers: this.buildHeaders(),
body: JSON.stringify({
messages: request.messages.map(msg => ({
role: msg.role,
content: msg.content,
})),
temperature: request.temperature ?? this.config.temperature,
top_p: request.topP ?? this.config.topP,
}),
});
if (!response.ok) {
const error = await response.json().catch(() => ({}));
throw {
statusCode: response.status,
message: error.error_msg || `HTTP ${response.status}: ${response.statusText}`,
};
}
const data = await response.json();
if (data.error_code) {
throw new Error(`API错误 (${data.error_code}): ${data.error_msg}`);
}
return {
content: data.result || '',
model: this.config.model,
usage: data.usage ? {
promptTokens: data.usage.prompt_tokens,
completionTokens: data.usage.completion_tokens,
totalTokens: data.usage.total_tokens,
} : undefined,
finishReason: 'stop',
};
}
private getModelEndpoint(model: string): string {
const endpoints: Record<string, string> = {
'ERNIE-4.0-8K': 'completions_pro',
'ERNIE-3.5-8K': 'completions',
'ERNIE-3.5-128K': 'ernie-3.5-128k',
'ERNIE-Speed-8K': 'ernie_speed',
'ERNIE-Speed-128K': 'ernie-speed-128k',
'ERNIE-Lite-8K': 'ernie-lite-8k',
'ERNIE-Tiny-8K': 'ernie-tiny-8k',
};
return endpoints[model] || 'completions';
}
async validateConfig(): Promise<boolean> {
await super.validateConfig();
if (!this.config.apiKey.includes(':')) {
throw new Error('百度API Key格式错误应为 "API_KEY:SECRET_KEY"');
}
return true;
}
}

View File

@ -0,0 +1,95 @@
/**
* Anthropic Claude适配器
*/
import { BaseLLMAdapter } from '../base-adapter';
import type { LLMRequest, LLMResponse } from '../types';
export class ClaudeAdapter extends BaseLLMAdapter {
private baseUrl: string;
constructor(config: any) {
super(config);
this.baseUrl = config.baseUrl || 'https://api.anthropic.com/v1';
}
async complete(request: LLMRequest): Promise<LLMResponse> {
try {
await this.validateConfig();
return await this.retry(async () => {
return await this.withTimeout(this._sendRequest(request));
});
} catch (error) {
this.handleError(error, 'Claude API调用失败');
}
}
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
// Claude API需要将system消息分离
const systemMessage = request.messages.find(msg => msg.role === 'system');
const messages = request.messages
.filter(msg => msg.role !== 'system')
.map(msg => ({
role: msg.role,
content: msg.content,
}));
const requestBody: any = {
model: this.config.model,
messages,
max_tokens: request.maxTokens ?? this.config.maxTokens ?? 4096,
temperature: request.temperature ?? this.config.temperature,
top_p: request.topP ?? this.config.topP,
};
if (systemMessage) {
requestBody.system = systemMessage.content;
}
const response = await fetch(`${this.baseUrl}/messages`, {
method: 'POST',
headers: this.buildHeaders({
'x-api-key': this.config.apiKey,
'anthropic-version': '2023-06-01',
}),
body: JSON.stringify(requestBody),
});
if (!response.ok) {
const error = await response.json().catch(() => ({}));
throw {
statusCode: response.status,
message: error.error?.message || `HTTP ${response.status}: ${response.statusText}`,
};
}
const data = await response.json();
if (!data.content || !data.content[0]) {
throw new Error('API响应格式异常: 缺少content字段');
}
return {
content: data.content[0].text || '',
model: data.model,
usage: data.usage ? {
promptTokens: data.usage.input_tokens,
completionTokens: data.usage.output_tokens,
totalTokens: data.usage.input_tokens + data.usage.output_tokens,
} : undefined,
finishReason: data.stop_reason,
};
}
async validateConfig(): Promise<boolean> {
await super.validateConfig();
if (!this.config.model.startsWith('claude-')) {
throw new Error(`无效的Claude模型: ${this.config.model}`);
}
return true;
}
}

View File

@ -0,0 +1,83 @@
/**
* DeepSeek适配器
*/
import { BaseLLMAdapter } from '../base-adapter';
import type { LLMRequest, LLMResponse } from '../types';
export class DeepSeekAdapter extends BaseLLMAdapter {
private baseUrl: string;
constructor(config: any) {
super(config);
this.baseUrl = config.baseUrl || 'https://api.deepseek.com';
}
async complete(request: LLMRequest): Promise<LLMResponse> {
try {
await this.validateConfig();
return await this.retry(async () => {
return await this.withTimeout(this._sendRequest(request));
});
} catch (error) {
this.handleError(error, 'DeepSeek API调用失败');
}
}
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
// DeepSeek API兼容OpenAI格式
const response = await fetch(`${this.baseUrl}/v1/chat/completions`, {
method: 'POST',
headers: this.buildHeaders({
'Authorization': `Bearer ${this.config.apiKey}`,
}),
body: JSON.stringify({
model: this.config.model,
messages: request.messages,
temperature: request.temperature ?? this.config.temperature,
max_tokens: request.maxTokens ?? this.config.maxTokens,
top_p: request.topP ?? this.config.topP,
frequency_penalty: this.config.frequencyPenalty,
presence_penalty: this.config.presencePenalty,
}),
});
if (!response.ok) {
const error = await response.json().catch(() => ({}));
throw {
statusCode: response.status,
message: error.error?.message || `HTTP ${response.status}: ${response.statusText}`,
};
}
const data = await response.json();
const choice = data.choices?.[0];
if (!choice) {
throw new Error('API响应格式异常: 缺少choices字段');
}
return {
content: choice.message?.content || '',
model: data.model,
usage: data.usage ? {
promptTokens: data.usage.prompt_tokens,
completionTokens: data.usage.completion_tokens,
totalTokens: data.usage.total_tokens,
} : undefined,
finishReason: choice.finish_reason,
};
}
async validateConfig(): Promise<boolean> {
await super.validateConfig();
if (!this.config.model) {
throw new Error('未指定DeepSeek模型');
}
return true;
}
}

View File

@ -0,0 +1,81 @@
/**
*
*/
import { BaseLLMAdapter } from '../base-adapter';
import type { LLMRequest, LLMResponse } from '../types';
export class DoubaoAdapter extends BaseLLMAdapter {
private baseUrl: string;
constructor(config: any) {
super(config);
this.baseUrl = config.baseUrl || 'https://ark.cn-beijing.volces.com/api/v3';
}
async complete(request: LLMRequest): Promise<LLMResponse> {
try {
await this.validateConfig();
return await this.retry(async () => {
return await this.withTimeout(this._sendRequest(request));
});
} catch (error) {
this.handleError(error, '豆包API调用失败');
}
}
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
// 豆包API兼容OpenAI格式
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: this.buildHeaders({
'Authorization': `Bearer ${this.config.apiKey}`,
}),
body: JSON.stringify({
model: this.config.model,
messages: request.messages,
temperature: request.temperature ?? this.config.temperature,
max_tokens: request.maxTokens ?? this.config.maxTokens,
top_p: request.topP ?? this.config.topP,
}),
});
if (!response.ok) {
const error = await response.json().catch(() => ({}));
throw {
statusCode: response.status,
message: error.error?.message || `HTTP ${response.status}: ${response.statusText}`,
};
}
const data = await response.json();
const choice = data.choices?.[0];
if (!choice) {
throw new Error('API响应格式异常: 缺少choices字段');
}
return {
content: choice.message?.content || '',
model: data.model,
usage: data.usage ? {
promptTokens: data.usage.prompt_tokens,
completionTokens: data.usage.completion_tokens,
totalTokens: data.usage.total_tokens,
} : undefined,
finishReason: choice.finish_reason,
};
}
async validateConfig(): Promise<boolean> {
await super.validateConfig();
if (!this.config.model) {
throw new Error('未指定豆包模型');
}
return true;
}
}

View File

@ -0,0 +1,78 @@
/**
* Google Gemini适配器
*/
import { GoogleGenerativeAI } from '@google/generative-ai';
import { BaseLLMAdapter } from '../base-adapter';
import type { LLMRequest, LLMResponse } from '../types';
export class GeminiAdapter extends BaseLLMAdapter {
private client: GoogleGenerativeAI;
constructor(config: any) {
super(config);
this.client = new GoogleGenerativeAI(this.config.apiKey);
}
async complete(request: LLMRequest): Promise<LLMResponse> {
try {
await this.validateConfig();
return await this.retry(async () => {
return await this.withTimeout(this._generateContent(request));
});
} catch (error) {
this.handleError(error, 'Gemini API调用失败');
}
}
private async _generateContent(request: LLMRequest): Promise<LLMResponse> {
const model = this.client.getGenerativeModel({
model: this.config.model,
generationConfig: {
temperature: request.temperature ?? this.config.temperature,
maxOutputTokens: request.maxTokens ?? this.config.maxTokens,
topP: request.topP ?? this.config.topP,
}
});
// 将消息转换为Gemini格式
const contents = request.messages
.filter(msg => msg.role !== 'system')
.map(msg => ({
role: msg.role === 'assistant' ? 'model' : 'user',
parts: [{ text: msg.content }],
}));
// 系统消息作为第一条用户消息的前缀
const systemMessage = request.messages.find(msg => msg.role === 'system');
if (systemMessage && contents.length > 0) {
contents[0].parts[0].text = `${systemMessage.content}\n\n${contents[0].parts[0].text}`;
}
const result = await model.generateContent({
contents,
safetySettings: [],
});
const response = result.response;
const text = response.text();
return {
content: text,
model: this.config.model,
finishReason: 'stop',
};
}
async validateConfig(): Promise<boolean> {
await super.validateConfig();
if (!this.config.model.startsWith('gemini-')) {
throw new Error(`无效的Gemini模型: ${this.config.model}`);
}
return true;
}
}

View File

@ -0,0 +1,15 @@
/**
* LLM适配器导出
*/
export { GeminiAdapter } from './gemini-adapter';
export { OpenAIAdapter } from './openai-adapter';
export { ClaudeAdapter } from './claude-adapter';
export { QwenAdapter } from './qwen-adapter';
export { DeepSeekAdapter } from './deepseek-adapter';
export { ZhipuAdapter } from './zhipu-adapter';
export { MoonshotAdapter } from './moonshot-adapter';
export { BaiduAdapter } from './baidu-adapter';
export { MinimaxAdapter } from './minimax-adapter';
export { DoubaoAdapter } from './doubao-adapter';

View File

@ -0,0 +1,85 @@
/**
* MiniMax适配器
*/
import { BaseLLMAdapter } from '../base-adapter';
import type { LLMRequest, LLMResponse } from '../types';
export class MinimaxAdapter extends BaseLLMAdapter {
private baseUrl: string;
constructor(config: any) {
super(config);
this.baseUrl = config.baseUrl || 'https://api.minimax.chat/v1';
}
async complete(request: LLMRequest): Promise<LLMResponse> {
try {
await this.validateConfig();
return await this.retry(async () => {
return await this.withTimeout(this._sendRequest(request));
});
} catch (error) {
this.handleError(error, 'MiniMax API调用失败');
}
}
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
// MiniMax API兼容OpenAI格式
const response = await fetch(`${this.baseUrl}/text/chatcompletion_v2`, {
method: 'POST',
headers: this.buildHeaders({
'Authorization': `Bearer ${this.config.apiKey}`,
}),
body: JSON.stringify({
model: this.config.model,
messages: request.messages,
temperature: request.temperature ?? this.config.temperature,
max_tokens: request.maxTokens ?? this.config.maxTokens,
top_p: request.topP ?? this.config.topP,
}),
});
if (!response.ok) {
const error = await response.json().catch(() => ({}));
throw {
statusCode: response.status,
message: error.base_resp?.status_msg || `HTTP ${response.status}: ${response.statusText}`,
};
}
const data = await response.json();
if (data.base_resp?.status_code !== 0) {
throw new Error(`API错误 (${data.base_resp?.status_code}): ${data.base_resp?.status_msg}`);
}
const choice = data.choices?.[0];
if (!choice) {
throw new Error('API响应格式异常: 缺少choices字段');
}
return {
content: choice.message?.content || '',
model: this.config.model,
usage: data.usage ? {
promptTokens: data.usage.total_tokens || 0,
completionTokens: 0,
totalTokens: data.usage.total_tokens || 0,
} : undefined,
finishReason: choice.finish_reason,
};
}
async validateConfig(): Promise<boolean> {
await super.validateConfig();
if (!this.config.model) {
throw new Error('未指定MiniMax模型');
}
return true;
}
}

View File

@ -0,0 +1,81 @@
/**
* Kimi适配器
*/
import { BaseLLMAdapter } from '../base-adapter';
import type { LLMRequest, LLMResponse } from '../types';
export class MoonshotAdapter extends BaseLLMAdapter {
private baseUrl: string;
constructor(config: any) {
super(config);
this.baseUrl = config.baseUrl || 'https://api.moonshot.cn/v1';
}
async complete(request: LLMRequest): Promise<LLMResponse> {
try {
await this.validateConfig();
return await this.retry(async () => {
return await this.withTimeout(this._sendRequest(request));
});
} catch (error) {
this.handleError(error, 'Moonshot API调用失败');
}
}
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
// Moonshot API兼容OpenAI格式
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: this.buildHeaders({
'Authorization': `Bearer ${this.config.apiKey}`,
}),
body: JSON.stringify({
model: this.config.model,
messages: request.messages,
temperature: request.temperature ?? this.config.temperature,
max_tokens: request.maxTokens ?? this.config.maxTokens,
top_p: request.topP ?? this.config.topP,
}),
});
if (!response.ok) {
const error = await response.json().catch(() => ({}));
throw {
statusCode: response.status,
message: error.error?.message || `HTTP ${response.status}: ${response.statusText}`,
};
}
const data = await response.json();
const choice = data.choices?.[0];
if (!choice) {
throw new Error('API响应格式异常: 缺少choices字段');
}
return {
content: choice.message?.content || '',
model: data.model,
usage: data.usage ? {
promptTokens: data.usage.prompt_tokens,
completionTokens: data.usage.completion_tokens,
totalTokens: data.usage.total_tokens,
} : undefined,
finishReason: choice.finish_reason,
};
}
async validateConfig(): Promise<boolean> {
await super.validateConfig();
if (!this.config.model.startsWith('moonshot-')) {
throw new Error(`无效的Moonshot模型: ${this.config.model}`);
}
return true;
}
}

View File

@ -0,0 +1,82 @@
/**
* OpenAI适配器 (GPT系列)
*/
import { BaseLLMAdapter } from '../base-adapter';
import type { LLMRequest, LLMResponse } from '../types';
export class OpenAIAdapter extends BaseLLMAdapter {
private baseUrl: string;
constructor(config: any) {
super(config);
this.baseUrl = config.baseUrl || 'https://api.openai.com/v1';
}
async complete(request: LLMRequest): Promise<LLMResponse> {
try {
await this.validateConfig();
return await this.retry(async () => {
return await this.withTimeout(this._sendRequest(request));
});
} catch (error) {
this.handleError(error, 'OpenAI API调用失败');
}
}
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: this.buildHeaders({
'Authorization': `Bearer ${this.config.apiKey}`,
}),
body: JSON.stringify({
model: this.config.model,
messages: request.messages,
temperature: request.temperature ?? this.config.temperature,
max_tokens: request.maxTokens ?? this.config.maxTokens,
top_p: request.topP ?? this.config.topP,
frequency_penalty: this.config.frequencyPenalty,
presence_penalty: this.config.presencePenalty,
}),
});
if (!response.ok) {
const error = await response.json().catch(() => ({}));
throw {
statusCode: response.status,
message: error.error?.message || `HTTP ${response.status}: ${response.statusText}`,
};
}
const data = await response.json();
const choice = data.choices?.[0];
if (!choice) {
throw new Error('API响应格式异常: 缺少choices字段');
}
return {
content: choice.message?.content || '',
model: data.model,
usage: data.usage ? {
promptTokens: data.usage.prompt_tokens,
completionTokens: data.usage.completion_tokens,
totalTokens: data.usage.total_tokens,
} : undefined,
finishReason: choice.finish_reason,
};
}
async validateConfig(): Promise<boolean> {
await super.validateConfig();
if (!this.config.model) {
throw new Error('未指定OpenAI模型');
}
return true;
}
}

View File

@ -0,0 +1,96 @@
/**
*
*/
import { BaseLLMAdapter } from '../base-adapter';
import type { LLMRequest, LLMResponse } from '../types';
export class QwenAdapter extends BaseLLMAdapter {
private baseUrl: string;
constructor(config: any) {
super(config);
this.baseUrl = config.baseUrl || 'https://dashscope.aliyuncs.com/api/v1';
}
async complete(request: LLMRequest): Promise<LLMResponse> {
try {
await this.validateConfig();
return await this.retry(async () => {
return await this.withTimeout(this._sendRequest(request));
});
} catch (error) {
this.handleError(error, '通义千问API调用失败');
}
}
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
const response = await fetch(`${this.baseUrl}/services/aigc/text-generation/generation`, {
method: 'POST',
headers: this.buildHeaders({
'Authorization': `Bearer ${this.config.apiKey}`,
'X-DashScope-SSE': 'disable',
}),
body: JSON.stringify({
model: this.config.model,
input: {
messages: request.messages.map(msg => ({
role: msg.role,
content: msg.content,
})),
},
parameters: {
temperature: request.temperature ?? this.config.temperature,
max_tokens: request.maxTokens ?? this.config.maxTokens,
top_p: request.topP ?? this.config.topP,
result_format: 'message',
},
}),
});
if (!response.ok) {
const error = await response.json().catch(() => ({}));
throw {
statusCode: response.status,
message: error.message || `HTTP ${response.status}: ${response.statusText}`,
};
}
const data = await response.json();
if (data.code && data.code !== '200') {
throw new Error(`API错误 (${data.code}): ${data.message}`);
}
const output = data.output;
if (!output?.choices?.[0]) {
throw new Error('API响应格式异常: 缺少output.choices字段');
}
const choice = output.choices[0];
return {
content: choice.message?.content || '',
model: this.config.model,
usage: output.usage ? {
promptTokens: output.usage.input_tokens,
completionTokens: output.usage.output_tokens,
totalTokens: output.usage.total_tokens ||
(output.usage.input_tokens + output.usage.output_tokens),
} : undefined,
finishReason: choice.finish_reason,
};
}
async validateConfig(): Promise<boolean> {
await super.validateConfig();
if (!this.config.model) {
throw new Error('未指定通义千问模型');
}
return true;
}
}

View File

@ -0,0 +1,81 @@
/**
* AI (GLM系列)
*/
import { BaseLLMAdapter } from '../base-adapter';
import type { LLMRequest, LLMResponse } from '../types';
export class ZhipuAdapter extends BaseLLMAdapter {
private baseUrl: string;
constructor(config: any) {
super(config);
this.baseUrl = config.baseUrl || 'https://open.bigmodel.cn/api/paas/v4';
}
async complete(request: LLMRequest): Promise<LLMResponse> {
try {
await this.validateConfig();
return await this.retry(async () => {
return await this.withTimeout(this._sendRequest(request));
});
} catch (error) {
this.handleError(error, '智谱AI API调用失败');
}
}
private async _sendRequest(request: LLMRequest): Promise<LLMResponse> {
// 智谱AI API兼容OpenAI格式
const response = await fetch(`${this.baseUrl}/chat/completions`, {
method: 'POST',
headers: this.buildHeaders({
'Authorization': `Bearer ${this.config.apiKey}`,
}),
body: JSON.stringify({
model: this.config.model,
messages: request.messages,
temperature: request.temperature ?? this.config.temperature,
max_tokens: request.maxTokens ?? this.config.maxTokens,
top_p: request.topP ?? this.config.topP,
}),
});
if (!response.ok) {
const error = await response.json().catch(() => ({}));
throw {
statusCode: response.status,
message: error.error?.message || `HTTP ${response.status}: ${response.statusText}`,
};
}
const data = await response.json();
const choice = data.choices?.[0];
if (!choice) {
throw new Error('API响应格式异常: 缺少choices字段');
}
return {
content: choice.message?.content || '',
model: data.model,
usage: data.usage ? {
promptTokens: data.usage.prompt_tokens,
completionTokens: data.usage.completion_tokens,
totalTokens: data.usage.total_tokens,
} : undefined,
finishReason: choice.finish_reason,
};
}
async validateConfig(): Promise<boolean> {
await super.validateConfig();
if (!this.config.model.startsWith('glm-')) {
throw new Error(`无效的智谱AI模型: ${this.config.model}`);
}
return true;
}
}

View File

@ -0,0 +1,137 @@
/**
* LLM适配器基类
*/
import type { ILLMAdapter, LLMConfig, LLMRequest, LLMResponse, LLMProvider } from './types';
import { LLMError, DEFAULT_LLM_CONFIG } from './types';
export abstract class BaseLLMAdapter implements ILLMAdapter {
protected config: LLMConfig;
constructor(config: LLMConfig) {
this.config = {
...DEFAULT_LLM_CONFIG,
...config,
};
}
abstract complete(request: LLMRequest): Promise<LLMResponse>;
getProvider(): LLMProvider {
return this.config.provider;
}
getModel(): string {
return this.config.model;
}
async validateConfig(): Promise<boolean> {
if (!this.config.apiKey) {
throw new LLMError(
'API Key未配置',
this.config.provider
);
}
return true;
}
/**
*
*/
protected async withTimeout<T>(
promise: Promise<T>,
timeoutMs: number = this.config.timeout || 150000
): Promise<T> {
return Promise.race([
promise,
new Promise<T>((_, reject) =>
setTimeout(() => reject(new LLMError(
`请求超时 (${timeoutMs}ms)`,
this.config.provider
)), timeoutMs)
),
]);
}
/**
* API错误
*/
protected handleError(error: any, context?: string): never {
let message = error.message || error;
// 针对不同错误类型提供更详细的信息
if (error.name === 'AbortError' || message.includes('超时')) {
message = `请求超时 (${this.config.timeout}ms)。建议:\n` +
`1. 检查网络连接是否正常\n` +
`2. 尝试增加超时时间(在.env中设置 VITE_LLM_TIMEOUT\n` +
`3. 验证API端点是否正确`;
} else if (error.statusCode === 401 || error.statusCode === 403) {
message = `API认证失败。建议\n` +
`1. 检查API Key是否正确配置\n` +
`2. 确认API Key是否有效且未过期\n` +
`3. 验证API Key权限是否充足`;
} else if (error.statusCode === 429) {
message = `API调用频率超限。建议\n` +
`1. 等待一段时间后重试\n` +
`2. 降低并发数VITE_LLM_CONCURRENCY\n` +
`3. 增加请求间隔VITE_LLM_GAP_MS`;
} else if (error.statusCode >= 500) {
message = `API服务异常 (${error.statusCode})。建议:\n` +
`1. 稍后重试\n` +
`2. 检查服务商状态页面\n` +
`3. 尝试切换其他LLM提供商`;
}
const fullMessage = context ? `${context}: ${message}` : message;
throw new LLMError(
fullMessage,
this.config.provider,
error.statusCode || error.status,
error
);
}
/**
*
*/
protected async retry<T>(
fn: () => Promise<T>,
maxAttempts: number = 3,
delay: number = 1000
): Promise<T> {
let lastError: any;
for (let attempt = 0; attempt < maxAttempts; attempt++) {
try {
return await fn();
} catch (error: any) {
lastError = error;
// 如果是4xx错误客户端错误不重试
if (error.statusCode >= 400 && error.statusCode < 500) {
throw error;
}
// 最后一次尝试时不等待
if (attempt < maxAttempts - 1) {
// 指数退避
await new Promise(resolve => setTimeout(resolve, delay * Math.pow(2, attempt)));
}
}
}
throw lastError;
}
/**
*
*/
protected buildHeaders(additionalHeaders: Record<string, string> = {}): Record<string, string> {
return {
'Content-Type': 'application/json',
...additionalHeaders,
};
}
}

View File

@ -0,0 +1,25 @@
/**
* LLM服务统一导出
*/
// 类型定义
export type {
LLMProvider,
LLMConfig,
LLMMessage,
LLMRequest,
LLMResponse,
ILLMAdapter,
} from './types';
// 工具类
export { LLMError, DEFAULT_LLM_CONFIG, DEFAULT_MODELS, DEFAULT_BASE_URLS } from './types';
export { BaseLLMAdapter } from './base-adapter';
// 适配器
export * from './adapters';
// 工厂和服务
export { LLMFactory } from './llm-factory';
export { LLMService, createLLMService, getDefaultLLMService } from './llm-service';

View File

@ -0,0 +1,221 @@
/**
* LLM工厂类 - LLM适配器
*/
import type { ILLMAdapter, LLMConfig, LLMProvider } from './types';
import { DEFAULT_MODELS } from './types';
import {
GeminiAdapter,
OpenAIAdapter,
ClaudeAdapter,
QwenAdapter,
DeepSeekAdapter,
ZhipuAdapter,
MoonshotAdapter,
BaiduAdapter,
MinimaxAdapter,
DoubaoAdapter,
} from './adapters';
/**
* LLM工厂类
*/
export class LLMFactory {
private static adapters: Map<string, ILLMAdapter> = new Map();
/**
* LLM适配器实例
*/
static createAdapter(config: LLMConfig): ILLMAdapter {
const cacheKey = this.getCacheKey(config);
// 从缓存中获取
if (this.adapters.has(cacheKey)) {
return this.adapters.get(cacheKey)!;
}
// 创建新的适配器实例
const adapter = this.instantiateAdapter(config);
// 缓存实例
this.adapters.set(cacheKey, adapter);
return adapter;
}
/**
*
*/
private static instantiateAdapter(config: LLMConfig): ILLMAdapter {
// 如果未指定模型,使用默认模型
if (!config.model) {
config.model = DEFAULT_MODELS[config.provider];
}
switch (config.provider) {
case 'gemini':
return new GeminiAdapter(config);
case 'openai':
return new OpenAIAdapter(config);
case 'claude':
return new ClaudeAdapter(config);
case 'qwen':
return new QwenAdapter(config);
case 'deepseek':
return new DeepSeekAdapter(config);
case 'zhipu':
return new ZhipuAdapter(config);
case 'moonshot':
return new MoonshotAdapter(config);
case 'baidu':
return new BaiduAdapter(config);
case 'minimax':
return new MinimaxAdapter(config);
case 'doubao':
return new DoubaoAdapter(config);
default:
throw new Error(`不支持的LLM提供商: ${config.provider}`);
}
}
/**
*
*/
private static getCacheKey(config: LLMConfig): string {
return `${config.provider}:${config.model}:${config.apiKey.substring(0, 8)}`;
}
/**
*
*/
static clearCache(): void {
this.adapters.clear();
}
/**
*
*/
static getSupportedProviders(): LLMProvider[] {
return [
'gemini',
'openai',
'claude',
'qwen',
'deepseek',
'zhipu',
'moonshot',
'baidu',
'minimax',
'doubao',
];
}
/**
*
*/
static getDefaultModel(provider: LLMProvider): string {
return DEFAULT_MODELS[provider];
}
/**
*
*/
static getAvailableModels(provider: LLMProvider): string[] {
const models: Record<LLMProvider, string[]> = {
gemini: [
'gemini-2.5-flash',
'gemini-2.5-pro',
'gemini-1.5-flash',
'gemini-1.5-pro',
],
openai: [
'gpt-4o',
'gpt-4o-mini',
'gpt-4-turbo',
'gpt-4',
'gpt-3.5-turbo',
],
claude: [
'claude-3-5-sonnet-20241022',
'claude-3-5-haiku-20241022',
'claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-haiku-20240307',
],
qwen: [
'qwen-turbo',
'qwen-plus',
'qwen-max',
'qwen-max-longcontext',
],
deepseek: [
'deepseek-chat',
'deepseek-coder',
],
zhipu: [
'glm-4-flash',
'glm-4',
'glm-4-air',
'glm-3-turbo',
],
moonshot: [
'moonshot-v1-8k',
'moonshot-v1-32k',
'moonshot-v1-128k',
],
baidu: [
'ERNIE-4.0-8K',
'ERNIE-3.5-8K',
'ERNIE-3.5-128K',
'ERNIE-Speed-8K',
'ERNIE-Speed-128K',
'ERNIE-Lite-8K',
'ERNIE-Tiny-8K',
],
minimax: [
'abab6.5-chat',
'abab6.5s-chat',
'abab5.5-chat',
],
doubao: [
'doubao-pro-32k',
'doubao-pro-128k',
'doubao-lite-32k',
'doubao-lite-128k',
],
};
return models[provider] || [];
}
/**
*
*/
static getProviderDisplayName(provider: LLMProvider): string {
const names: Record<LLMProvider, string> = {
gemini: 'Google Gemini',
openai: 'OpenAI GPT',
claude: 'Anthropic Claude',
qwen: '阿里云通义千问',
deepseek: 'DeepSeek',
zhipu: '智谱AI (GLM)',
moonshot: '月之暗面 Kimi',
baidu: '百度文心一言',
minimax: 'MiniMax',
doubao: '字节豆包',
};
return names[provider] || provider;
}
}

View File

@ -0,0 +1,102 @@
/**
* LLM服务 - LLM调用接口
*/
import type { ILLMAdapter, LLMConfig, LLMRequest, LLMResponse } from './types';
import { LLMFactory } from './llm-factory';
import { env } from '@/shared/config/env';
/**
* LLM服务类
*/
export class LLMService {
private adapter: ILLMAdapter;
constructor(config: LLMConfig) {
this.adapter = LLMFactory.createAdapter(config);
}
/**
*
*/
async complete(request: LLMRequest): Promise<LLMResponse> {
return await this.adapter.complete(request);
}
/**
*
*/
async simpleComplete(prompt: string, systemPrompt?: string): Promise<string> {
const messages: any[] = [];
if (systemPrompt) {
messages.push({ role: 'system', content: systemPrompt });
}
messages.push({ role: 'user', content: prompt });
const response = await this.adapter.complete({ messages });
return response.content;
}
/**
*
*/
async validateConfig(): Promise<boolean> {
return await this.adapter.validateConfig();
}
/**
*
*/
getProvider() {
return this.adapter.getProvider();
}
/**
*
*/
getModel() {
return this.adapter.getModel();
}
/**
*
*/
static createFromEnv(): LLMService {
const provider = env.LLM_PROVIDER as any || 'gemini';
const apiKey = env.LLM_API_KEY || env.GEMINI_API_KEY;
const model = env.LLM_MODEL || env.GEMINI_MODEL;
if (!apiKey) {
throw new Error('未配置LLM API Key请在环境变量中设置');
}
const config: LLMConfig = {
provider,
apiKey,
model,
baseUrl: env.LLM_BASE_URL,
timeout: env.LLM_TIMEOUT || env.GEMINI_TIMEOUT_MS,
temperature: env.LLM_TEMPERATURE,
maxTokens: env.LLM_MAX_TOKENS,
};
return new LLMService(config);
}
}
/**
* LLM服务实例的便捷函数
*/
export function createLLMService(config: LLMConfig): LLMService {
return new LLMService(config);
}
/**
* LLM服务实例
*/
export function getDefaultLLMService(): LLMService {
return LLMService.createFromEnv();
}

View File

@ -0,0 +1,135 @@
/**
* LLM服务类型定义
*/
// 支持的LLM提供商类型
export type LLMProvider =
| 'gemini' // Google Gemini
| 'openai' // OpenAI (GPT系列)
| 'claude' // Anthropic Claude
| 'qwen' // 阿里云通义千问
| 'deepseek' // DeepSeek
| 'zhipu' // 智谱AI (GLM系列)
| 'moonshot' // 月之暗面 Kimi
| 'baidu' // 百度文心一言
| 'minimax' // MiniMax
| 'doubao'; // 字节豆包
// LLM配置接口
export interface LLMConfig {
provider: LLMProvider;
apiKey: string;
model: string;
baseUrl?: string; // 自定义API端点
timeout?: number; // 超时时间(ms)
temperature?: number; // 温度参数
maxTokens?: number; // 最大token数
topP?: number; // Top-p采样
frequencyPenalty?: number; // 频率惩罚
presencePenalty?: number; // 存在惩罚
}
// LLM请求消息
export interface LLMMessage {
role: 'system' | 'user' | 'assistant';
content: string;
}
// LLM请求参数
export interface LLMRequest {
messages: LLMMessage[];
temperature?: number;
maxTokens?: number;
topP?: number;
stream?: boolean;
}
// LLM响应
export interface LLMResponse {
content: string;
model?: string;
usage?: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
};
finishReason?: string;
}
// LLM适配器接口
export interface ILLMAdapter {
/**
*
*/
complete(request: LLMRequest): Promise<LLMResponse>;
/**
*
*/
streamComplete?(request: LLMRequest): AsyncGenerator<string, void, unknown>;
/**
*
*/
getProvider(): LLMProvider;
/**
*
*/
getModel(): string;
/**
*
*/
validateConfig(): Promise<boolean>;
}
// 错误类型
export class LLMError extends Error {
constructor(
message: string,
public provider: LLMProvider,
public statusCode?: number,
public originalError?: any
) {
super(message);
this.name = 'LLMError';
}
}
// 默认配置
export const DEFAULT_LLM_CONFIG: Partial<LLMConfig> = {
timeout: 150000,
temperature: 0.2,
maxTokens: 4096,
topP: 1.0,
frequencyPenalty: 0,
presencePenalty: 0,
};
// 各平台默认模型
export const DEFAULT_MODELS: Record<LLMProvider, string> = {
gemini: 'gemini-2.5-flash',
openai: 'gpt-4o-mini',
claude: 'claude-3-5-sonnet-20241022',
qwen: 'qwen-turbo',
deepseek: 'deepseek-chat',
zhipu: 'glm-4-flash',
moonshot: 'moonshot-v1-8k',
baidu: 'ERNIE-3.5-8K',
minimax: 'abab6.5-chat',
doubao: 'doubao-pro-32k',
};
// 各平台API端点
export const DEFAULT_BASE_URLS: Partial<Record<LLMProvider, string>> = {
openai: 'https://api.openai.com/v1',
qwen: 'https://dashscope.aliyuncs.com/api/v1',
deepseek: 'https://api.deepseek.com',
zhipu: 'https://open.bigmodel.cn/api/paas/v4',
moonshot: 'https://api.moonshot.cn/v1',
baidu: 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1',
minimax: 'https://api.minimax.chat/v1',
doubao: 'https://ark.cn-beijing.volces.com/api/v3',
};