diff --git a/.env.example b/.env.example
index 42be5af..e3f410c 100644
--- a/.env.example
+++ b/.env.example
@@ -4,22 +4,22 @@
# 复制此文件为 .env 并填写你的配置
# ==================== LLM 通用配置 ====================
-# 选择你想使用的LLM提供商 (gemini|openai|claude|qwen|deepseek|zhipu|moonshot|baidu|minimax|doubao)
+# 选择你想使用的LLM提供商 (gemini|openai|claude|qwen|deepseek|zhipu|moonshot|baidu|minimax|doubao|ollama)
VITE_LLM_PROVIDER=gemini
# 通用LLM配置 (可选,如果设置了这些,会覆盖下面的特定平台配置)
# VITE_LLM_API_KEY=your_api_key_here
# VITE_LLM_MODEL=your_model_name
# VITE_LLM_BASE_URL=https://custom-api-endpoint.com
-# VITE_LLM_TIMEOUT=30000
+# VITE_LLM_TIMEOUT=150000
# VITE_LLM_TEMPERATURE=0.2
# VITE_LLM_MAX_TOKENS=4096
# ==================== Google Gemini 配置 ====================
# 获取API Key: https://makersuite.google.com/app/apikey
-VITE_GEMINI_API_KEY=your_gemini_api_key_here
-VITE_GEMINI_MODEL=gemini-2.5-flash
-VITE_GEMINI_TIMEOUT_MS=25000
+# VITE_GEMINI_API_KEY=your_gemini_api_key_here
+# VITE_GEMINI_MODEL=gemini-2.5-flash
+# VITE_GEMINI_TIMEOUT_MS=150000
# ==================== OpenAI 配置 ====================
# 获取API Key: https://platform.openai.com/api-keys
@@ -67,22 +67,27 @@ VITE_GEMINI_TIMEOUT_MS=25000
# 获取API Key: https://console.volcengine.com/ark
# 注意:豆包使用endpoint ID,需要先创建推理接入点
# VITE_DOUBAO_API_KEY=your_doubao_api_key_here
-# VITE_DOUBAO_MODEL=ep-xxxxx-xxxxx # 填写你的endpoint ID
-# ==================== 字节豆包 配置 ====================
-# 获取API Key: https://console.volcengine.com/ark
-# 注意:豆包使用endpoint ID,需要先创建推理接入点
-# VITE_DOUBAO_API_KEY=your_doubao_api_key_here
-# VITE_DOUBAO_MODEL=ep-xxxxx-xxxxx # 填写你的endpoint ID
-# ==================== 字节豆包 配置 ====================
-# 获取API Key: https://console.volcengine.com/ark
-# 注意:豆包使用endpoint ID,需要先创建推理接入点
-# VITE_DOUBAO_API_KEY=your_doubao_api_key_here
-# VITE_DOUBAO_MODEL=ep-xxxxx-xxxxx # 填写你的endpoint ID
-# ==================== 字节豆包 配置 ====================
-# 获取API Key: https://console.volcengine.com/ark
-# 注意:豆包使用endpoint ID,需要先创建推理接入点
-# VITE_DOUBAO_API_KEY=your_doubao_api_key_here
-# VITE_DOUBAO_MODEL=ep-xxxxx-xxxxx # 填写你的endpoint ID
+# VITE_DOUBAO_MODEL=doubao-pro-32k
+
+# ==================== Ollama 本地大模型配置 ====================
+# Ollama 允许在本地运行开源大模型,无需 API Key
+# 安装: https://ollama.com/
+# 快速开始:
+# 1. 安装 Ollama: curl -fsSL https://ollama.com/install.sh | sh
+# 2. 下载模型: ollama pull llama3
+# 3. 配置如下并启动应用
+# VITE_OLLAMA_API_KEY=ollama # 本地运行不需要真实Key,填写任意值
+# VITE_OLLAMA_MODEL=llama3
+# VITE_OLLAMA_BASE_URL=http://localhost:11434/v1
+#
+# 推荐模型:
+# - llama3 (综合能力强,适合各种任务)
+# - codellama (代码专用,适合代码审查)
+# - qwen2.5:7b (中文支持好)
+# - deepseek-coder (代码理解能力强)
+# - phi3:mini (轻量级,速度快)
+#
+# 更多模型: https://ollama.com/library
# ==================== Supabase 数据库配置 (可选) ====================
# 如果不配置,系统将以演示模式运行,数据不会持久化
diff --git a/README.md b/README.md
index a2585b0..cbc2b92 100644
--- a/README.md
+++ b/README.md
@@ -28,7 +28,7 @@
- **AI 驱动的深度分析**:超越传统静态分析,理解代码意图,发现深层逻辑问题。
- **多维度、全方位评估**:从**安全性**、**性能**、**可维护性**到**代码风格**,提供 360 度无死角的质量评估。
- **清晰、可行的修复建议**:独创 **What-Why-How** 模式,不仅告诉您“是什么”问题,还解释“为什么”,并提供“如何修复”的具体代码示例。
-- **多平台LLM支持**: 已实现 10+ 主流平台API调用功能(Gemini、OpenAI、Claude、通义千问、DeepSeek、智谱AI、Kimi、文心一言、MiniMax、豆包),支持用户自由配置和切换。
+- **多平台LLM/本地LLM支持**: 已实现 10+ 主流平台API调用功能(Gemini、OpenAI、Claude、通义千问、DeepSeek、智谱AI、Kimi、文心一言、MiniMax、豆包、Ollama本地大模型),支持用户自由配置和切换。
- **现代化、高颜值的用户界面**:基于 React + TypeScript 构建,提供流畅、直观的操作体验。
## 🎬 项目演示
@@ -273,6 +273,46 @@ VITE_DEEPSEEK_API_KEY=deepseek_key
```
+
+Q: 如何使用 Ollama 本地大模型?
+
+Ollama 允许您在本地运行开源大模型,无需 API Key,保护数据隐私:
+
+**1. 安装 Ollama**
+```bash
+# macOS / Linux
+curl -fsSL https://ollama.com/install.sh | sh
+
+# Windows
+# 下载并安装:https://ollama.com/download
+```
+
+**2. 拉取并运行模型**
+```bash
+# 拉取 Llama3 模型
+ollama pull llama3
+
+# 验证模型是否可用
+ollama list
+```
+
+**3. 配置 XCodeReviewer**
+```env
+VITE_LLM_PROVIDER=ollama
+VITE_LLM_API_KEY=ollama # 填写任意值即可
+VITE_LLM_MODEL=llama3 # 使用的模型名称
+VITE_LLM_BASE_URL=http://localhost:11434/v1 # Ollama API地址
+```
+
+**推荐模型:**
+- `llama3` - Meta 的开源大模型,性能优秀
+- `codellama` - 专门针对代码优化的模型
+- `qwen2.5` - 阿里云通义千问开源版本
+- `deepseek-coder` - DeepSeek 代码专用模型
+
+更多模型请访问:https://ollama.com/library
+
+
### 🔑 获取 API Key
#### 🎯 支持的 LLM 平台
@@ -293,6 +333,9 @@ XCodeReviewer 现已支持多个主流 LLM 平台,您可以根据需求自由
- **MiniMax** [获取API Key](https://www.minimaxi.com/)
- **字节豆包** [获取API Key](https://console.volcengine.com/ark)
+**本地部署:**
+- **Ollama** - 本地运行开源大模型,支持 Llama3、Mistral、CodeLlama 等 [安装指南](https://ollama.com/)
+
#### 📝 配置示例
在 `.env` 文件中配置您选择的平台:
@@ -308,6 +351,12 @@ VITE_GEMINI_API_KEY=your_gemini_api_key
VITE_OPENAI_API_KEY=your_openai_api_key
VITE_CLAUDE_API_KEY=your_claude_api_key
# ... 其他平台配置
+
+# 使用 Ollama 本地大模型(无需 API Key)
+VITE_LLM_PROVIDER=ollama
+VITE_LLM_API_KEY=ollama # 填写任意值即可
+VITE_LLM_MODEL=llama3 # 使用的模型名称
+VITE_LLM_BASE_URL=http://localhost:11434/v1 # Ollama API地址(可选)
```
**快速切换平台:** 只需修改 `VITE_LLM_PROVIDER` 的值,即可在不同平台间自由切换!
@@ -559,8 +608,8 @@ pnpm lint
目前 XCodeReviewer 定位为快速原型验证阶段,功能需要逐渐完善,根据项目后续发展和大家的建议,未来开发计划如下(尽快实现):
-- **✅ 多平台LLM支持**: 已实现 10+ 主流平台API调用功能(Gemini、OpenAI、Claude、通义千问、DeepSeek、智谱AI、Kimi、文心一言、MiniMax、豆包),支持用户自由配置和切换
-- **本地模型支持**: 计划加入对本地大模型(如 Ollama)的调用功能,满足数据隐私需求
+- **✅ 多平台LLM支持**: 已实现 10+ 主流平台API调用功能(Gemini、OpenAI、Claude、通义千问、DeepSeek、智谱AI、Kimi、文心一言、MiniMax、豆包、Ollama本地大模型),支持用户自由配置和切换
+- **✅ 本地模型支持**: 已加入对 Ollama 本地大模型的调用功能,满足数据隐私需求
- **Multi-Agent Collaboration**: 考虑引入多智能体协作架构,会实现`Agent+人工对话`反馈的功能,包括多轮对话流程展示,人工对话中断干涉等,以获得更清晰、透明、监督性的审计过程,提升审计质量
- **专业报告文件生成**: 根据不同的需求生成相关格式的专业审计报告文件,支持文件报告格式定制等
- **审计标准自定义**: 不同团队有自己的编码规范,不同项目有特定的安全要求,也正是我们这个项目想后续做的东西。当前的版本还属于一个“半黑盒模式”,项目通过 Prompt 工程来引导分析方向和定义审计标准,实际分析效果由强大的预训练AI 模型内置知识决定。后续将结合强化学习、监督学习微调等方法,开发以支持自定义规则配置,通过YAML或者JSON定义团队特定规则,提供常见框架的最佳实践模板等等,以获得更加符合需求和标准的审计结果
diff --git a/README_EN.md b/README_EN.md
index 24d5abd..5bf5e8e 100644
--- a/README_EN.md
+++ b/README_EN.md
@@ -28,7 +28,7 @@ In the fast-paced world of software development, ensuring code quality is crucia
- **🤖 AI-Driven Deep Analysis**: Beyond traditional static analysis, understands code intent and discovers deep logical issues.
- **🎯 Multi-dimensional, Comprehensive Assessment**: From **security**, **performance**, **maintainability** to **code style**, providing 360-degree quality evaluation.
- **💡 Clear, Actionable Fix Suggestions**: Innovative **What-Why-How** approach that not only tells you "what" the problem is, but also explains "why" and provides "how to fix" with specific code examples.
-- **✅ Multi-Platform LLM Support**: Implemented API calling functionality for 10+ mainstream platforms (Gemini, OpenAI, Claude, Qwen, DeepSeek, Zhipu AI, Kimi, ERNIE, MiniMax, Doubao), with support for free configuration and switching
+- **✅ Multi-Platform LLM/Local Model Support**: Implemented API calling functionality for 10+ mainstream platforms (Gemini, OpenAI, Claude, Qwen, DeepSeek, Zhipu AI, Kimi, ERNIE, MiniMax, Doubao, Ollama Local Models), with support for free configuration and switching
- **✨ Modern, Beautiful User Interface**: Built with React + TypeScript, providing a smooth and intuitive user experience.
## 🎬 Project Demo
@@ -280,6 +280,46 @@ VITE_DEEPSEEK_API_KEY=deepseek_key
```
+
+Q: How to use Ollama local models?
+
+Ollama allows you to run open-source models locally without an API key, protecting data privacy:
+
+**1. Install Ollama**
+```bash
+# macOS / Linux
+curl -fsSL https://ollama.com/install.sh | sh
+
+# Windows
+# Download and install: https://ollama.com/download
+```
+
+**2. Pull and run a model**
+```bash
+# Pull Llama3 model
+ollama pull llama3
+
+# Verify the model is available
+ollama list
+```
+
+**3. Configure XCodeReviewer**
+```env
+VITE_LLM_PROVIDER=ollama
+VITE_LLM_API_KEY=ollama # Can be any value
+VITE_LLM_MODEL=llama3 # Model name to use
+VITE_LLM_BASE_URL=http://localhost:11434/v1 # Ollama API address
+```
+
+**Recommended Models:**
+- `llama3` - Meta's open-source model with excellent performance
+- `codellama` - Code-optimized model
+- `qwen2.5` - Open-source version of Alibaba Qwen
+- `deepseek-coder` - DeepSeek's code-specialized model
+
+More models available at: https://ollama.com/library
+
+
### 🔑 Getting API Keys
@@ -301,6 +341,9 @@ XCodeReviewer now supports multiple mainstream LLM platforms. You can choose fre
- **MiniMax** [Get API Key](https://www.minimaxi.com/)
- **Bytedance Doubao (豆包)** [Get API Key](https://console.volcengine.com/ark)
+**Local Deployment:**
+- **Ollama** - Run open-source models locally, supports Llama3, Mistral, CodeLlama, etc. [Installation Guide](https://ollama.com/)
+
#### 📝 Configuration Examples
Configure your chosen platform in the `.env` file:
@@ -316,6 +359,12 @@ VITE_GEMINI_API_KEY=your_gemini_api_key
VITE_OPENAI_API_KEY=your_openai_api_key
VITE_CLAUDE_API_KEY=your_claude_api_key
# ... Other platform configurations
+
+# Using Ollama Local Models (No API Key Required)
+VITE_LLM_PROVIDER=ollama
+VITE_LLM_API_KEY=ollama # Can be any value
+VITE_LLM_MODEL=llama3 # Model name to use
+VITE_LLM_BASE_URL=http://localhost:11434/v1 # Ollama API address (optional)
```
**Quick Platform Switch:** Simply modify the value of `VITE_LLM_PROVIDER` to switch between different platforms!
@@ -566,8 +615,8 @@ We warmly welcome all forms of contributions! Whether it's submitting issues, cr
Currently, XCodeReviewer is positioned in the rapid prototype verification stage, and its functions need to be gradually improved. Based on the subsequent development of the project and everyone's suggestions, the future development plan is as follows (to be implemented as soon as possible):
-- **✅ Multi-Platform LLM Support**: Implemented API calling functionality for 10+ mainstream platforms (Gemini, OpenAI, Claude, Qwen, DeepSeek, Zhipu AI, Kimi, ERNIE, MiniMax, Doubao), with support for free configuration and switching
-- **Local Model Support**: Planning to add support for local large models (such as Ollama) to meet data privacy requirements
+- **✅ Multi-Platform LLM Support**: Implemented API calling functionality for 10+ mainstream platforms (Gemini, OpenAI, Claude, Qwen, DeepSeek, Zhipu AI, Kimi, ERNIE, MiniMax, Doubao, Ollama Local Models), with support for free configuration and switching
+- **✅ Local Model Support**: Added support for Ollama local large models to meet data privacy requirements
- **Multi-Agent Collaboration**: Consider introducing a multi-agent collaboration architecture, which will implement the `Agent + Human Dialogue` feedback function, including multi-round dialogue process display, human dialogue interruption intervention, etc., to obtain a clearer, more transparent, and supervised auditing process, thereby improving audit quality.
- **Professional Report File Generation**: Generate professional audit report files in relevant formats according to different needs, supporting customization of file report formats, etc.
- **Custom Audit Standards**: Different teams have their own coding standards, and different projects have specific security requirements, which is exactly what we want to do next in this project. The current version is still in a "semi-black box mode", where the project guides the analysis direction and defines audit standards through Prompt engineering, and the actual analysis effect is determined by the built-in knowledge of powerful pre-trained AI models. In the future, we will combine methods such as reinforcement learning and supervised learning fine-tuning to develop support for custom rule configuration, define team-specific rules through YAML or JSON, provide best practice templates for common frameworks, etc., to obtain audit results that are more in line with requirements and standards.
diff --git a/example.txt b/example.txt
new file mode 100644
index 0000000..e3f410c
--- /dev/null
+++ b/example.txt
@@ -0,0 +1,109 @@
+# ========================================
+# XCodeReviewer 环境变量配置示例
+# ========================================
+# 复制此文件为 .env 并填写你的配置
+
+# ==================== LLM 通用配置 ====================
+# 选择你想使用的LLM提供商 (gemini|openai|claude|qwen|deepseek|zhipu|moonshot|baidu|minimax|doubao|ollama)
+VITE_LLM_PROVIDER=gemini
+
+# 通用LLM配置 (可选,如果设置了这些,会覆盖下面的特定平台配置)
+# VITE_LLM_API_KEY=your_api_key_here
+# VITE_LLM_MODEL=your_model_name
+# VITE_LLM_BASE_URL=https://custom-api-endpoint.com
+# VITE_LLM_TIMEOUT=150000
+# VITE_LLM_TEMPERATURE=0.2
+# VITE_LLM_MAX_TOKENS=4096
+
+# ==================== Google Gemini 配置 ====================
+# 获取API Key: https://makersuite.google.com/app/apikey
+# VITE_GEMINI_API_KEY=your_gemini_api_key_here
+# VITE_GEMINI_MODEL=gemini-2.5-flash
+# VITE_GEMINI_TIMEOUT_MS=150000
+
+# ==================== OpenAI 配置 ====================
+# 获取API Key: https://platform.openai.com/api-keys
+# VITE_OPENAI_API_KEY=your_openai_api_key_here
+# VITE_OPENAI_MODEL=gpt-4o-mini
+# VITE_OPENAI_BASE_URL=https://api.openai.com/v1
+
+# ==================== Anthropic Claude 配置 ====================
+# 获取API Key: https://console.anthropic.com/
+# VITE_CLAUDE_API_KEY=your_claude_api_key_here
+# VITE_CLAUDE_MODEL=claude-3-5-sonnet-20241022
+
+# ==================== 阿里云通义千问 配置 ====================
+# 获取API Key: https://dashscope.console.aliyun.com/
+# VITE_QWEN_API_KEY=your_qwen_api_key_here
+# VITE_QWEN_MODEL=qwen-turbo
+
+# ==================== DeepSeek 配置 ====================
+# 获取API Key: https://platform.deepseek.com/
+# VITE_DEEPSEEK_API_KEY=your_deepseek_api_key_here
+# VITE_DEEPSEEK_MODEL=deepseek-chat
+
+# ==================== 智谱AI (GLM) 配置 ====================
+# 获取API Key: https://open.bigmodel.cn/
+# VITE_ZHIPU_API_KEY=your_zhipu_api_key_here
+# VITE_ZHIPU_MODEL=glm-4-flash
+
+# ==================== 月之暗面 Kimi 配置 ====================
+# 获取API Key: https://platform.moonshot.cn/
+# VITE_MOONSHOT_API_KEY=your_moonshot_api_key_here
+# VITE_MOONSHOT_MODEL=moonshot-v1-8k
+
+# ==================== 百度文心一言 配置 ====================
+# 获取API Key: https://console.bce.baidu.com/qianfan/
+# 注意:百度API Key格式为 "API_KEY:SECRET_KEY"
+# VITE_BAIDU_API_KEY=your_api_key:your_secret_key
+# VITE_BAIDU_MODEL=ERNIE-3.5-8K
+
+# ==================== MiniMax 配置 ====================
+# 获取API Key: https://www.minimaxi.com/
+# VITE_MINIMAX_API_KEY=your_minimax_api_key_here
+# VITE_MINIMAX_MODEL=abab6.5-chat
+
+# ==================== 字节豆包 配置 ====================
+# 获取API Key: https://console.volcengine.com/ark
+# 注意:豆包使用endpoint ID,需要先创建推理接入点
+# VITE_DOUBAO_API_KEY=your_doubao_api_key_here
+# VITE_DOUBAO_MODEL=doubao-pro-32k
+
+# ==================== Ollama 本地大模型配置 ====================
+# Ollama 允许在本地运行开源大模型,无需 API Key
+# 安装: https://ollama.com/
+# 快速开始:
+# 1. 安装 Ollama: curl -fsSL https://ollama.com/install.sh | sh
+# 2. 下载模型: ollama pull llama3
+# 3. 配置如下并启动应用
+# VITE_OLLAMA_API_KEY=ollama # 本地运行不需要真实Key,填写任意值
+# VITE_OLLAMA_MODEL=llama3
+# VITE_OLLAMA_BASE_URL=http://localhost:11434/v1
+#
+# 推荐模型:
+# - llama3 (综合能力强,适合各种任务)
+# - codellama (代码专用,适合代码审查)
+# - qwen2.5:7b (中文支持好)
+# - deepseek-coder (代码理解能力强)
+# - phi3:mini (轻量级,速度快)
+#
+# 更多模型: https://ollama.com/library
+
+# ==================== Supabase 数据库配置 (可选) ====================
+# 如果不配置,系统将以演示模式运行,数据不会持久化
+# 获取配置: https://supabase.com/
+# VITE_SUPABASE_URL=https://your-project.supabase.co
+# VITE_SUPABASE_ANON_KEY=your-anon-key-here
+
+# ==================== GitHub 集成配置 (可选) ====================
+# 用于仓库分析功能
+# 获取Token: https://github.com/settings/tokens
+# VITE_GITHUB_TOKEN=your_github_token_here
+
+# ==================== 应用配置 ====================
+VITE_APP_ID=xcodereviewer
+
+# ==================== 代码分析配置 ====================
+VITE_MAX_ANALYZE_FILES=40
+VITE_LLM_CONCURRENCY=2
+VITE_LLM_GAP_MS=500
diff --git a/src/features/analysis/services/codeAnalysis.ts b/src/features/analysis/services/codeAnalysis.ts
index eea12ec..88e9ef5 100644
--- a/src/features/analysis/services/codeAnalysis.ts
+++ b/src/features/analysis/services/codeAnalysis.ts
@@ -80,6 +80,11 @@ export class CodeAnalysisEngine {
let text = '';
try {
+ console.log('🚀 开始调用 LLM 分析...');
+ console.log(`📡 提供商: ${env.LLM_PROVIDER}`);
+ console.log(`🤖 模型: ${getCurrentLLMModel()}`);
+ console.log(`🔗 Base URL: ${env.LLM_BASE_URL || '(默认)'}`);
+
// 使用新的LLM服务进行分析
const response = await llmService.complete({
messages: [
@@ -89,6 +94,10 @@ export class CodeAnalysisEngine {
temperature: 0.2,
});
text = response.content;
+
+ console.log('✅ LLM 响应成功');
+ console.log(`📊 响应长度: ${text.length} 字符`);
+ console.log(`📝 响应内容预览: ${text.substring(0, 200)}...`);
} catch (e: any) {
console.error('LLM分析失败:', e);
@@ -109,10 +118,60 @@ export class CodeAnalysisEngine {
);
}
const parsed = this.safeParseJson(text);
+
+ // 如果解析失败,抛出错误而不是返回默认值
+ if (!parsed) {
+ const provider = env.LLM_PROVIDER;
+ const currentModel = getCurrentLLMModel();
+
+ let suggestions = '';
+ if (provider === 'ollama') {
+ suggestions =
+ `建议解决方案:\n` +
+ `1. 升级到更强的模型(推荐):\n` +
+ ` ollama pull codellama\n` +
+ ` ollama pull qwen2.5:7b\n` +
+ `2. 更新配置文件 .env:\n` +
+ ` VITE_LLM_MODEL=codellama\n` +
+ `3. 重启应用后重试\n\n` +
+ `注意:超轻量模型仅适合测试连接,实际使用需要更强的模型。`;
+ } else {
+ suggestions =
+ `建议解决方案:\n` +
+ `1. 尝试更换更强大的模型(在 .env 中修改 VITE_LLM_MODEL)\n` +
+ `2. 检查当前模型是否支持结构化输出(JSON 格式)\n` +
+ `3. 尝试切换到其他 LLM 提供商:\n` +
+ ` - Gemini (免费额度充足)\n` +
+ ` - OpenAI GPT (稳定可靠)\n` +
+ ` - Claude (代码理解能力强)\n` +
+ ` - DeepSeek (性价比高)\n` +
+ `4. 如果使用代理,检查网络连接是否稳定\n` +
+ `5. 增加超时时间(VITE_LLM_TIMEOUT)`;
+ }
+
+ throw new Error(
+ `LLM 响应解析失败\n\n` +
+ `提供商: ${provider}\n` +
+ `模型: ${currentModel || '(默认)'}\n\n` +
+ `原因:当前模型返回的内容不是有效的 JSON 格式,\n` +
+ `这可能是因为模型能力不足或配置不当。\n\n` +
+ suggestions
+ );
+ }
+
+ console.log('🔍 解析结果:', {
+ hasIssues: Array.isArray(parsed?.issues),
+ issuesCount: parsed?.issues?.length || 0,
+ hasMetrics: !!parsed?.metrics,
+ hasQualityScore: !!parsed?.quality_score
+ });
const issues = Array.isArray(parsed?.issues) ? parsed.issues : [];
const metrics = parsed?.metrics ?? this.estimateMetricsFromIssues(issues);
const qualityScore = parsed?.quality_score ?? this.calculateQualityScore(metrics, issues);
+
+ console.log(`📋 最终发现 ${issues.length} 个问题`);
+ console.log(`⭐ 质量评分: ${qualityScore}`);
return {
issues,
@@ -129,13 +188,83 @@ export class CodeAnalysisEngine {
}
private static safeParseJson(text: string): any {
+ // 预处理:修复常见的非标准 JSON 格式
+ const fixJsonFormat = (str: string): string => {
+ // 1. 去除前后空白
+ str = str.trim();
+
+ // 2. 将 JavaScript 模板字符串(反引号)替换为双引号,并处理多行内容
+ // 匹配: "key": `多行内容` => "key": "转义后的内容"
+ str = str.replace(/:\s*`([\s\S]*?)`/g, (match, content) => {
+ // 转义所有特殊字符
+ let escaped = content
+ .replace(/\\/g, '\\\\') // 反斜杠
+ .replace(/"/g, '\\"') // 双引号
+ .replace(/\n/g, '\\n') // 换行符
+ .replace(/\r/g, '\\r') // 回车符
+ .replace(/\t/g, '\\t') // 制表符
+ .replace(/\f/g, '\\f') // 换页符
+ .replace(/\b/g, '\\b'); // 退格符
+ return `: "${escaped}"`;
+ });
+
+ // 3. 处理字符串中未转义的换行符(防御性处理)
+ // 匹配双引号字符串内的实际换行符
+ str = str.replace(/"([^"]*?)"/g, (match, content) => {
+ if (content.includes('\n') || content.includes('\r') || content.includes('\t')) {
+ const escaped = content
+ .replace(/\n/g, '\\n')
+ .replace(/\r/g, '\\r')
+ .replace(/\t/g, '\\t')
+ .replace(/\f/g, '\\f')
+ .replace(/\b/g, '\\b');
+ return `"${escaped}"`;
+ }
+ return match;
+ });
+
+ // 4. 修复尾部逗号(JSON 不允许)
+ str = str.replace(/,(\s*[}\]])/g, '$1');
+
+ // 5. 修复缺少逗号的问题(两个连续的 } 或 ])
+ str = str.replace(/\}(\s*)\{/g, '},\n{');
+ str = str.replace(/\](\s*)\[/g, '],\n[');
+
+ return str;
+ };
+
try {
- return JSON.parse(text);
- } catch {
- const match = text.match(/\{[\s\S]*\}/);
- if (match) {
- try { return JSON.parse(match[0]); } catch {}
+ // 先尝试修复后直接解析
+ const fixed = fixJsonFormat(text);
+ return JSON.parse(fixed);
+ } catch (e1) {
+ // 如果失败,尝试提取 JSON 对象
+ try {
+ const match = text.match(/\{[\s\S]*\}/);
+ if (match) {
+ const fixed = fixJsonFormat(match[0]);
+ return JSON.parse(fixed);
+ }
+ } catch (e2) {
+ console.warn('提取 JSON 对象后解析失败:', e2);
}
+
+ // 尝试去除 markdown 代码块标记
+ try {
+ const codeBlockMatch = text.match(/```(?:json)?\s*(\{[\s\S]*\})\s*```/);
+ if (codeBlockMatch) {
+ const fixed = fixJsonFormat(codeBlockMatch[1]);
+ return JSON.parse(fixed);
+ }
+ } catch (e3) {
+ console.warn('从代码块提取 JSON 失败:', e3);
+ }
+
+ console.error('⚠️ 无法解析 LLM 响应为 JSON');
+ console.error('原始内容(前500字符):', text.substring(0, 500));
+ console.error('解析错误:', e1);
+ console.warn('💡 提示: 当前模型可能无法生成有效的 JSON 格式');
+ console.warn(' 建议:更换更强大的模型或切换其他 LLM 提供商');
return null;
}
}
diff --git a/src/shared/config/env.ts b/src/shared/config/env.ts
index b93af67..51cf9b0 100644
--- a/src/shared/config/env.ts
+++ b/src/shared/config/env.ts
@@ -1,7 +1,7 @@
// 环境变量配置
export const env = {
// ==================== LLM 通用配置 ====================
- // 当前使用的LLM提供商 (gemini|openai|claude|qwen|deepseek|zhipu|moonshot|baidu|minimax|doubao)
+ // 当前使用的LLM提供商 (gemini|openai|claude|qwen|deepseek|zhipu|moonshot|baidu|minimax|doubao|ollama)
LLM_PROVIDER: import.meta.env.VITE_LLM_PROVIDER || 'gemini',
// LLM API Key
LLM_API_KEY: import.meta.env.VITE_LLM_API_KEY || '',
@@ -58,6 +58,11 @@ export const env = {
DOUBAO_API_KEY: import.meta.env.VITE_DOUBAO_API_KEY || '',
DOUBAO_MODEL: import.meta.env.VITE_DOUBAO_MODEL || 'doubao-pro-32k',
+ // ==================== Ollama 本地模型配置 ====================
+ OLLAMA_API_KEY: import.meta.env.VITE_OLLAMA_API_KEY || 'ollama',
+ OLLAMA_MODEL: import.meta.env.VITE_OLLAMA_MODEL || 'llama3',
+ OLLAMA_BASE_URL: import.meta.env.VITE_OLLAMA_BASE_URL || 'http://localhost:11434/v1',
+
// ==================== Supabase 配置 ====================
SUPABASE_URL: import.meta.env.VITE_SUPABASE_URL || '',
SUPABASE_ANON_KEY: import.meta.env.VITE_SUPABASE_ANON_KEY || '',
@@ -101,6 +106,7 @@ export function getCurrentLLMApiKey(): string {
baidu: env.BAIDU_API_KEY,
minimax: env.MINIMAX_API_KEY,
doubao: env.DOUBAO_API_KEY,
+ ollama: env.OLLAMA_API_KEY,
};
return providerKeyMap[provider] || '';
@@ -129,6 +135,7 @@ export function getCurrentLLMModel(): string {
baidu: env.BAIDU_MODEL,
minimax: env.MINIMAX_MODEL,
doubao: env.DOUBAO_MODEL,
+ ollama: env.OLLAMA_MODEL,
};
return providerModelMap[provider] || '';
diff --git a/src/shared/services/llm/adapters/index.ts b/src/shared/services/llm/adapters/index.ts
index b4bf036..c25ca03 100644
--- a/src/shared/services/llm/adapters/index.ts
+++ b/src/shared/services/llm/adapters/index.ts
@@ -12,4 +12,5 @@ export { MoonshotAdapter } from './moonshot-adapter';
export { BaiduAdapter } from './baidu-adapter';
export { MinimaxAdapter } from './minimax-adapter';
export { DoubaoAdapter } from './doubao-adapter';
+export { OllamaAdapter } from './ollama-adapter';
diff --git a/src/shared/services/llm/adapters/ollama-adapter.ts b/src/shared/services/llm/adapters/ollama-adapter.ts
new file mode 100644
index 0000000..2eeae87
--- /dev/null
+++ b/src/shared/services/llm/adapters/ollama-adapter.ts
@@ -0,0 +1,94 @@
+/**
+ * Ollama适配器 - 支持本地运行的开源大模型
+ * Ollama使用OpenAI兼容的API格式
+ */
+
+import { BaseLLMAdapter } from '../base-adapter';
+import type { LLMRequest, LLMResponse } from '../types';
+
+export class OllamaAdapter extends BaseLLMAdapter {
+ private baseUrl: string;
+
+ constructor(config: any) {
+ super(config);
+ this.baseUrl = config.baseUrl || 'http://localhost:11434/v1';
+ }
+
+ async complete(request: LLMRequest): Promise {
+ try {
+ // Ollama 不强制要求 API Key,但仍然验证配置
+ await this.validateConfig();
+
+ return await this.retry(async () => {
+ return await this.withTimeout(this._sendRequest(request));
+ });
+ } catch (error) {
+ this.handleError(error, 'Ollama API调用失败');
+ }
+ }
+
+ private async _sendRequest(request: LLMRequest): Promise {
+ const headers: Record = {
+ 'Content-Type': 'application/json',
+ };
+
+ // 如果配置了 API Key,则添加到请求头(某些 Ollama 部署可能需要)
+ if (this.config.apiKey && this.config.apiKey !== 'ollama') {
+ headers['Authorization'] = `Bearer ${this.config.apiKey}`;
+ }
+
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
+ method: 'POST',
+ headers,
+ body: JSON.stringify({
+ model: this.config.model,
+ messages: request.messages,
+ temperature: request.temperature ?? this.config.temperature,
+ max_tokens: request.maxTokens ?? this.config.maxTokens,
+ top_p: request.topP ?? this.config.topP,
+ stream: false,
+ }),
+ });
+
+ if (!response.ok) {
+ const error = await response.json().catch(() => ({}));
+ throw {
+ statusCode: response.status,
+ message: error.error?.message || `HTTP ${response.status}: ${response.statusText}`,
+ };
+ }
+
+ const data = await response.json();
+ const choice = data.choices?.[0];
+
+ if (!choice) {
+ throw new Error('API响应格式异常: 缺少choices字段');
+ }
+
+ return {
+ content: choice.message?.content || '',
+ model: data.model,
+ usage: data.usage ? {
+ promptTokens: data.usage.prompt_tokens || 0,
+ completionTokens: data.usage.completion_tokens || 0,
+ totalTokens: data.usage.total_tokens || 0,
+ } : undefined,
+ finishReason: choice.finish_reason,
+ };
+ }
+
+ /**
+ * Ollama 不强制要求 API Key
+ * 可以使用任意字符串作为占位符,或者不设置
+ */
+ async validateConfig(): Promise {
+ if (!this.config.model) {
+ throw new Error('未指定Ollama模型');
+ }
+
+ // Ollama 本地运行不需要验证 API Key
+ // 但如果配置了,我们保持兼容性
+ return true;
+ }
+}
+
diff --git a/src/shared/services/llm/llm-factory.ts b/src/shared/services/llm/llm-factory.ts
index f48e484..f94dc8d 100644
--- a/src/shared/services/llm/llm-factory.ts
+++ b/src/shared/services/llm/llm-factory.ts
@@ -15,6 +15,7 @@ import {
BaiduAdapter,
MinimaxAdapter,
DoubaoAdapter,
+ OllamaAdapter,
} from './adapters';
/**
@@ -83,6 +84,9 @@ export class LLMFactory {
case 'doubao':
return new DoubaoAdapter(config);
+ case 'ollama':
+ return new OllamaAdapter(config);
+
default:
throw new Error(`不支持的LLM提供商: ${config.provider}`);
}
@@ -117,6 +121,7 @@ export class LLMFactory {
'baidu',
'minimax',
'doubao',
+ 'ollama',
];
}
@@ -193,6 +198,17 @@ export class LLMFactory {
'doubao-lite-32k',
'doubao-lite-128k',
],
+ ollama: [
+ 'llama3',
+ 'llama3.1',
+ 'llama3.2',
+ 'mistral',
+ 'codellama',
+ 'qwen2.5',
+ 'gemma2',
+ 'phi3',
+ 'deepseek-coder',
+ ],
};
return models[provider] || [];
@@ -213,6 +229,7 @@ export class LLMFactory {
baidu: '百度文心一言',
minimax: 'MiniMax',
doubao: '字节豆包',
+ ollama: 'Ollama 本地大模型',
};
return names[provider] || provider;
diff --git a/src/shared/services/llm/llm-service.ts b/src/shared/services/llm/llm-service.ts
index 08aab1b..e4ba19d 100644
--- a/src/shared/services/llm/llm-service.ts
+++ b/src/shared/services/llm/llm-service.ts
@@ -72,11 +72,19 @@ export class LLMService {
throw new Error('未配置LLM API Key,请在环境变量中设置');
}
+ // 获取 baseUrl,优先使用通用配置,然后是平台专用配置
+ let baseUrl = env.LLM_BASE_URL;
+ if (!baseUrl && provider === 'openai') {
+ baseUrl = env.OPENAI_BASE_URL;
+ } else if (!baseUrl && provider === 'ollama') {
+ baseUrl = env.OLLAMA_BASE_URL;
+ }
+
const config: LLMConfig = {
provider,
apiKey,
model,
- baseUrl: env.LLM_BASE_URL,
+ baseUrl,
timeout: env.LLM_TIMEOUT || env.GEMINI_TIMEOUT_MS,
temperature: env.LLM_TEMPERATURE,
maxTokens: env.LLM_MAX_TOKENS,
diff --git a/src/shared/services/llm/types.ts b/src/shared/services/llm/types.ts
index 1829c28..9430b20 100644
--- a/src/shared/services/llm/types.ts
+++ b/src/shared/services/llm/types.ts
@@ -13,7 +13,8 @@ export type LLMProvider =
| 'moonshot' // 月之暗面 Kimi
| 'baidu' // 百度文心一言
| 'minimax' // MiniMax
- | 'doubao'; // 字节豆包
+ | 'doubao' // 字节豆包
+ | 'ollama'; // Ollama 本地大模型
// LLM配置接口
export interface LLMConfig {
@@ -119,6 +120,7 @@ export const DEFAULT_MODELS: Record = {
baidu: 'ERNIE-3.5-8K',
minimax: 'abab6.5-chat',
doubao: 'doubao-pro-32k',
+ ollama: 'llama3',
};
// 各平台API端点
@@ -131,5 +133,6 @@ export const DEFAULT_BASE_URLS: Partial> = {
baidu: 'https://aip.baidubce.com/rpc/2.0/ai_custom/v1',
minimax: 'https://api.minimax.chat/v1',
doubao: 'https://ark.cn-beijing.volces.com/api/v3',
+ ollama: 'http://localhost:11434/v1',
};