CodeReview/.env.example

176 lines
6.8 KiB
Plaintext
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# ========================================
# XCodeReviewer 环境变量配置示例
# ========================================
# 复制此文件为 .env 并填写你的配置
# ==================== LLM 通用配置 ====================
# 选择你想使用的LLM提供商 (gemini|openai|claude|qwen|deepseek|zhipu|moonshot|baidu|minimax|doubao|ollama)
VITE_LLM_PROVIDER=gemini
# 通用LLM配置 (可选,如果设置了这些,会覆盖下面的特定平台配置)
# VITE_LLM_API_KEY=your_api_key_here
# VITE_LLM_MODEL=your_model_name
# VITE_LLM_BASE_URL=https://your-proxy.com/v1 # API中转站地址支持所有平台
# VITE_LLM_TIMEOUT=150000
# VITE_LLM_TEMPERATURE=0.2
# VITE_LLM_MAX_TOKENS=4096
# VITE_LLM_CUSTOM_HEADERS={"X-Custom-Header":"value"} # 自定义请求头JSON格式
# ==================== Google Gemini 配置 ====================
# 获取API Key: https://makersuite.google.com/app/apikey
# 注意Gemini 现在也支持 API 中转站,只需在上方 VITE_LLM_BASE_URL 中填写中转站地址
# VITE_GEMINI_API_KEY=your_gemini_api_key_here
# VITE_GEMINI_MODEL=gemini-1.5-flash
# VITE_GEMINI_TIMEOUT_MS=150000
# ==================== OpenAI 配置 ====================
# 获取API Key: https://platform.openai.com/api-keys
# VITE_OPENAI_API_KEY=your_openai_api_key_here
# VITE_OPENAI_MODEL=gpt-4o-mini
# VITE_OPENAI_BASE_URL=https://api.openai.com/v1
# ==================== Anthropic Claude 配置 ====================
# 获取API Key: https://console.anthropic.com/
# VITE_CLAUDE_API_KEY=your_claude_api_key_here
# VITE_CLAUDE_MODEL=claude-3-5-sonnet-20241022
# ==================== 阿里云通义千问 配置 ====================
# 获取API Key: https://dashscope.console.aliyun.com/
# VITE_QWEN_API_KEY=your_qwen_api_key_here
# VITE_QWEN_MODEL=qwen-turbo
# ==================== DeepSeek 配置 ====================
# 获取API Key: https://platform.deepseek.com/
# VITE_DEEPSEEK_API_KEY=your_deepseek_api_key_here
# VITE_DEEPSEEK_MODEL=deepseek-chat
# ==================== 智谱AI (GLM) 配置 ====================
# 获取API Key: https://open.bigmodel.cn/
# VITE_ZHIPU_API_KEY=your_zhipu_api_key_here
# VITE_ZHIPU_MODEL=glm-4-flash
# ==================== 月之暗面 Kimi 配置 ====================
# 获取API Key: https://platform.moonshot.cn/
# VITE_MOONSHOT_API_KEY=your_moonshot_api_key_here
# VITE_MOONSHOT_MODEL=moonshot-v1-8k
# ==================== 百度文心一言 配置 ====================
# 获取API Key: https://console.bce.baidu.com/qianfan/
# 注意百度API Key格式为 "API_KEY:SECRET_KEY"
# VITE_BAIDU_API_KEY=your_api_key:your_secret_key
# VITE_BAIDU_MODEL=ERNIE-3.5-8K
# ==================== MiniMax 配置 ====================
# 获取API Key: https://www.minimaxi.com/
# VITE_MINIMAX_API_KEY=your_minimax_api_key_here
# VITE_MINIMAX_MODEL=abab6.5-chat
# ==================== 字节豆包 配置 ====================
# 获取API Key: https://console.volcengine.com/ark
# 注意豆包使用endpoint ID需要先创建推理接入点
# VITE_DOUBAO_API_KEY=your_doubao_api_key_here
# VITE_DOUBAO_MODEL=doubao-pro-32k
# ==================== Ollama 本地大模型配置 ====================
# Ollama 允许在本地运行开源大模型,无需 API Key
# 安装: https://ollama.com/
# 快速开始:
# 1. 安装 Ollama: curl -fsSL https://ollama.com/install.sh | sh
# 2. 下载模型: ollama pull llama3
# 3. 配置如下并启动应用
# VITE_OLLAMA_API_KEY=ollama # 本地运行不需要真实Key填写任意值
# VITE_OLLAMA_MODEL=llama3
# VITE_OLLAMA_BASE_URL=http://localhost:11434/v1
#
# 推荐模型:
# - llama3 (综合能力强,适合各种任务)
# - codellama (代码专用,适合代码审查)
# - qwen2.5:7b (中文支持好)
# - deepseek-coder (代码理解能力强)
# - phi3:mini (轻量级,速度快)
#
# 更多模型: https://ollama.com/library
# ==================== 数据库配置 (推荐使用本地数据库) ====================
# 方式1本地数据库推荐开箱即用
VITE_USE_LOCAL_DB=true
# 方式2Supabase 云端数据库(支持多设备同步)
# 如果不配置,系统将以演示模式运行,数据不会持久化
# 获取配置: https://supabase.com/
# VITE_SUPABASE_URL=https://your-project.supabase.co
# VITE_SUPABASE_ANON_KEY=your-anon-key-here
# ==================== Git 仓库集成配置 (可选) ====================
# 用于访问私有仓库进行代码审计
# GitHub Token
# 获取Token: https://github.com/settings/tokens
# 权限需求: repo (访问私有仓库)
# VITE_GITHUB_TOKEN=ghp_your_github_token_here
# GitLab Token
# 获取Token: https://gitlab.com/-/profile/personal_access_tokens
# 权限需求: read_api, read_repository
# VITE_GITLAB_TOKEN=glpat-your_gitlab_token_here
# 💡 提示:
# 1. 公开仓库无需配置 Token
# 2. 私有仓库或容器内访问需要配置相应的 Token
# 3. 支持自建 GitLab 服务器Token 格式相同)
# ==================== 应用配置 ====================
VITE_APP_ID=xcodereviewer
# ==================== 代码分析配置 ====================
VITE_MAX_ANALYZE_FILES=40
VITE_LLM_CONCURRENCY=2
VITE_LLM_GAP_MS=500
VITE_OUTPUT_LANGUAGE=zh-CN # zh-CN: 中文 | en-US: 英文
# ========================================
# API 中转站使用示例(推荐)
# ========================================
# 大部分用户使用 API 中转站访问 LLM以下是常见配置示例
# 示例 1使用硅基流动中转站OpenAI 兼容格式)
# VITE_LLM_PROVIDER=openai
# VITE_LLM_API_KEY=sk-你的硅基流动Key
# VITE_LLM_MODEL=deepseek-ai/DeepSeek-V3
# VITE_LLM_BASE_URL=https://api.siliconflow.cn/v1
# 示例 2使用 OpenRouter支持所有模型
# VITE_LLM_PROVIDER=openai
# VITE_LLM_API_KEY=sk-or-你的OpenRouterKey
# VITE_LLM_MODEL=anthropic/claude-3.5-sonnet
# VITE_LLM_BASE_URL=https://openrouter.ai/api/v1
# 示例 3使用 Gemini 中转站Gemini 格式)
# VITE_LLM_PROVIDER=gemini
# VITE_LLM_API_KEY=你的中转站Key
# VITE_LLM_MODEL=gemini-1.5-flash
# VITE_LLM_BASE_URL=https://你的gemini中转站.com/v1beta
# 示例 4自建服务 + 自定义请求头
# VITE_LLM_PROVIDER=openai
# VITE_LLM_API_KEY=your-custom-key
# VITE_LLM_MODEL=custom-model
# VITE_LLM_BASE_URL=https://your-server.com/v1
# VITE_LLM_CUSTOM_HEADERS={"X-API-Version":"v1","X-Team-ID":"team123"}
# ========================================
# 重要提示
# ========================================
# 1. 推荐使用"运行时配置":无需修改此文件,直接在浏览器中配置
# 访问 http://localhost:8888/admin → 系统配置标签页
#
# 2. API 中转站 URL 格式:
# - OpenAI 兼容格式通常以 /v1 结尾
# - Gemini 格式通常以 /v1beta 结尾
# - Claude 格式通常以 /v1 结尾
#
# 3. API 格式支持:
# - OpenAI 兼容格式最常见90%+ 中转站)
# - Gemini 格式Google Gemini 官方及兼容服务)
# - Claude 格式Anthropic Claude 官方及兼容服务)