import { useState, useEffect } from "react"; import { Button } from "@/components/ui/button"; import { Input } from "@/components/ui/input"; import { Label } from "@/components/ui/label"; import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"; import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; import { Settings, Save, RotateCcw, Eye, EyeOff, CheckCircle2, AlertCircle, Info, Zap, Globe, PlayCircle, Loader2 } from "lucide-react"; import { toast } from "sonner"; import { api } from "@/shared/api/database"; // LLM 提供商配置 - 简化分类 const LLM_PROVIDERS = [ { value: 'openai', label: 'OpenAI GPT', icon: '🟢', category: 'litellm', hint: 'gpt-4o, gpt-4o-mini 等' }, { value: 'claude', label: 'Anthropic Claude', icon: '🟣', category: 'litellm', hint: 'claude-3.5-sonnet 等' }, { value: 'gemini', label: 'Google Gemini', icon: '🔵', category: 'litellm', hint: 'gemini-1.5-flash 等' }, { value: 'deepseek', label: 'DeepSeek', icon: '🔷', category: 'litellm', hint: 'deepseek-chat, deepseek-coder' }, { value: 'qwen', label: '通义千问', icon: '🟠', category: 'litellm', hint: 'qwen-turbo, qwen-max 等' }, { value: 'zhipu', label: '智谱AI (GLM)', icon: '🔴', category: 'litellm', hint: 'glm-4-flash, glm-4 等' }, { value: 'moonshot', label: 'Moonshot (Kimi)', icon: '🌙', category: 'litellm', hint: 'moonshot-v1-8k 等' }, { value: 'ollama', label: 'Ollama 本地', icon: '🖥️', category: 'litellm', hint: 'llama3, codellama 等' }, { value: 'baidu', label: '百度文心', icon: '📘', category: 'native', hint: 'ERNIE-3.5-8K (需要 API_KEY:SECRET_KEY)' }, { value: 'minimax', label: 'MiniMax', icon: '⚡', category: 'native', hint: 'abab6.5-chat 等' }, { value: 'doubao', label: '字节豆包', icon: '🎯', category: 'native', hint: 'doubao-pro-32k 等' }, ]; const DEFAULT_MODELS: Record = { openai: 'gpt-4o-mini', claude: 'claude-3-5-sonnet-20241022', gemini: 'gemini-2.5-flash', deepseek: 'deepseek-chat', qwen: 'qwen-turbo', zhipu: 'glm-4-flash', moonshot: 'moonshot-v1-8k', ollama: 'llama3', baidu: 'ERNIE-3.5-8K', minimax: 'abab6.5-chat', doubao: 'doubao-pro-32k', }; interface SystemConfigData { llmProvider: string; llmApiKey: string; llmModel: string; llmBaseUrl: string; llmTimeout: number; llmTemperature: number; llmMaxTokens: number; githubToken: string; gitlabToken: string; maxAnalyzeFiles: number; llmConcurrency: number; llmGapMs: number; outputLanguage: string; } export function SystemConfig() { const [config, setConfig] = useState(null); const [loading, setLoading] = useState(true); const [showApiKey, setShowApiKey] = useState(false); const [hasChanges, setHasChanges] = useState(false); const [testingLLM, setTestingLLM] = useState(false); const [llmTestResult, setLlmTestResult] = useState<{ success: boolean; message: string } | null>(null); useEffect(() => { loadConfig(); }, []); const loadConfig = async () => { try { setLoading(true); const defaultConfig = await api.getDefaultConfig(); const backendConfig = await api.getUserConfig(); const merged: SystemConfigData = { llmProvider: backendConfig?.llmConfig?.llmProvider || defaultConfig?.llmConfig?.llmProvider || 'openai', llmApiKey: backendConfig?.llmConfig?.llmApiKey || '', llmModel: backendConfig?.llmConfig?.llmModel || '', llmBaseUrl: backendConfig?.llmConfig?.llmBaseUrl || '', llmTimeout: backendConfig?.llmConfig?.llmTimeout || defaultConfig?.llmConfig?.llmTimeout || 150000, llmTemperature: backendConfig?.llmConfig?.llmTemperature ?? defaultConfig?.llmConfig?.llmTemperature ?? 0.1, llmMaxTokens: backendConfig?.llmConfig?.llmMaxTokens || defaultConfig?.llmConfig?.llmMaxTokens || 4096, githubToken: backendConfig?.otherConfig?.githubToken || '', gitlabToken: backendConfig?.otherConfig?.gitlabToken || '', maxAnalyzeFiles: backendConfig?.otherConfig?.maxAnalyzeFiles || defaultConfig?.otherConfig?.maxAnalyzeFiles || 50, llmConcurrency: backendConfig?.otherConfig?.llmConcurrency || defaultConfig?.otherConfig?.llmConcurrency || 3, llmGapMs: backendConfig?.otherConfig?.llmGapMs || defaultConfig?.otherConfig?.llmGapMs || 2000, outputLanguage: backendConfig?.otherConfig?.outputLanguage || defaultConfig?.otherConfig?.outputLanguage || 'zh-CN', }; setConfig(merged); } catch (error) { console.error('Failed to load config:', error); setConfig({ llmProvider: 'openai', llmApiKey: '', llmModel: '', llmBaseUrl: '', llmTimeout: 150000, llmTemperature: 0.1, llmMaxTokens: 4096, githubToken: '', gitlabToken: '', maxAnalyzeFiles: 50, llmConcurrency: 3, llmGapMs: 2000, outputLanguage: 'zh-CN', }); } finally { setLoading(false); } }; const saveConfig = async () => { if (!config) return; try { await api.updateUserConfig({ llmConfig: { llmProvider: config.llmProvider, llmApiKey: config.llmApiKey, llmModel: config.llmModel, llmBaseUrl: config.llmBaseUrl, llmTimeout: config.llmTimeout, llmTemperature: config.llmTemperature, llmMaxTokens: config.llmMaxTokens, }, otherConfig: { githubToken: config.githubToken, gitlabToken: config.gitlabToken, maxAnalyzeFiles: config.maxAnalyzeFiles, llmConcurrency: config.llmConcurrency, llmGapMs: config.llmGapMs, outputLanguage: config.outputLanguage, }, }); setHasChanges(false); toast.success("配置已保存!"); } catch (error) { toast.error(`保存失败: ${error instanceof Error ? error.message : '未知错误'}`); } }; const resetConfig = async () => { if (!window.confirm("确定要重置为默认配置吗?")) return; try { await api.deleteUserConfig(); await loadConfig(); setHasChanges(false); toast.success("已重置为默认配置"); } catch (error) { toast.error(`重置失败: ${error instanceof Error ? error.message : '未知错误'}`); } }; const updateConfig = (key: keyof SystemConfigData, value: string | number) => { if (!config) return; setConfig(prev => prev ? { ...prev, [key]: value } : null); setHasChanges(true); }; const testLLMConnection = async () => { if (!config) return; if (!config.llmApiKey && config.llmProvider !== 'ollama') { toast.error('请先配置 API Key'); return; } setTestingLLM(true); setLlmTestResult(null); try { const result = await api.testLLMConnection({ provider: config.llmProvider, apiKey: config.llmApiKey, model: config.llmModel || undefined, baseUrl: config.llmBaseUrl || undefined, }); setLlmTestResult(result); if (result.success) toast.success(`连接成功!模型: ${result.model}`); else toast.error(`连接失败: ${result.message}`); } catch (error) { const msg = error instanceof Error ? error.message : '未知错误'; setLlmTestResult({ success: false, message: msg }); toast.error(`测试失败: ${msg}`); } finally { setTestingLLM(false); } }; if (loading || !config) { return (

加载配置中...

); } const currentProvider = LLM_PROVIDERS.find(p => p.value === config.llmProvider); const isConfigured = config.llmApiKey !== '' || config.llmProvider === 'ollama'; return (
{/* 状态栏 */}
{isConfigured ? ( LLM 已配置 ({currentProvider?.label}) ) : ( 请配置 LLM API Key )}
{hasChanges && ( )}
LLM 配置 分析参数 Git 集成 {/* LLM 配置 - 简化版 */}
{/* 提供商选择 */}
{/* API Key */} {config.llmProvider !== 'ollama' && (
updateConfig('llmApiKey', e.target.value)} placeholder={config.llmProvider === 'baidu' ? 'API_KEY:SECRET_KEY 格式' : '输入你的 API Key'} className="h-12 bg-gray-50 border-2 border-black rounded-none font-mono" />
)} {/* 模型和 Base URL */}
updateConfig('llmModel', e.target.value)} placeholder={`默认: ${DEFAULT_MODELS[config.llmProvider] || 'auto'}`} className="h-10 bg-gray-50 border-2 border-black rounded-none font-mono" />
updateConfig('llmBaseUrl', e.target.value)} placeholder="留空使用官方地址,或填入中转站地址" className="h-10 bg-gray-50 border-2 border-black rounded-none font-mono" />
{/* 测试连接 */}
测试连接 验证配置是否正确
{llmTestResult && (
{llmTestResult.success ? : } {llmTestResult.message}
)} {/* 高级参数 - 折叠 */}
高级参数
updateConfig('llmTimeout', Number(e.target.value))} className="h-10 bg-gray-50 border-2 border-black rounded-none font-mono" />
updateConfig('llmTemperature', Number(e.target.value))} className="h-10 bg-gray-50 border-2 border-black rounded-none font-mono" />
updateConfig('llmMaxTokens', Number(e.target.value))} className="h-10 bg-gray-50 border-2 border-black rounded-none font-mono" />
{/* 使用说明 */}

💡 配置说明

LiteLLM 统一适配: 大多数提供商通过 LiteLLM 统一处理,支持自动重试和负载均衡

原生适配器: 百度、MiniMax、豆包因 API 格式特殊,使用专用适配器

API 中转站: 在 Base URL 填入中转站地址即可,API Key 填中转站提供的 Key

{/* 分析参数 */}
updateConfig('maxAnalyzeFiles', Number(e.target.value))} className="h-10 bg-gray-50 border-2 border-black rounded-none font-mono" />

单次任务最多处理的文件数量

updateConfig('llmConcurrency', Number(e.target.value))} className="h-10 bg-gray-50 border-2 border-black rounded-none font-mono" />

同时发送的 LLM 请求数量

updateConfig('llmGapMs', Number(e.target.value))} className="h-10 bg-gray-50 border-2 border-black rounded-none font-mono" />

每个请求之间的延迟时间

代码审查结果的输出语言

{/* Git 集成 */}
updateConfig('githubToken', e.target.value)} placeholder="ghp_xxxxxxxxxxxx" className="h-10 bg-gray-50 border-2 border-black rounded-none font-mono" />

用于访问私有仓库。获取: github.com/settings/tokens

updateConfig('gitlabToken', e.target.value)} placeholder="glpat-xxxxxxxxxxxx" className="h-10 bg-gray-50 border-2 border-black rounded-none font-mono" />

用于访问私有仓库。获取: gitlab.com/-/profile/personal_access_tokens

💡 提示

• 公开仓库无需配置 Token

• 私有仓库需要配置对应平台的 Token

{/* 底部保存按钮 */} {hasChanges && (
)}
); }