/** * System Config Component * Cyberpunk Terminal Aesthetic */ import { useState, useEffect } from "react"; import { Button } from "@/components/ui/button"; import { Input } from "@/components/ui/input"; import { Label } from "@/components/ui/label"; import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"; import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; import { Settings, Save, RotateCcw, Eye, EyeOff, CheckCircle2, AlertCircle, Info, Zap, Globe, PlayCircle, Brain } from "lucide-react"; import { toast } from "sonner"; import { api } from "@/shared/api/database"; import EmbeddingConfig from "@/components/agent/EmbeddingConfig"; // LLM Providers - 2025 const LLM_PROVIDERS = [ { value: 'openai', label: 'OpenAI GPT', icon: '🟢', category: 'litellm', hint: 'gpt-5, gpt-5-mini, o3 等' }, { value: 'claude', label: 'Anthropic Claude', icon: '🟣', category: 'litellm', hint: 'claude-sonnet-4.5, claude-opus-4 等' }, { value: 'gemini', label: 'Google Gemini', icon: '🔵', category: 'litellm', hint: 'gemini-3-pro, gemini-3-flash 等' }, { value: 'deepseek', label: 'DeepSeek', icon: '🔷', category: 'litellm', hint: 'deepseek-v3.1-terminus, deepseek-v3 等' }, { value: 'qwen', label: '通义千问', icon: '🟠', category: 'litellm', hint: 'qwen3-max-instruct, qwen3-plus 等' }, { value: 'zhipu', label: '智谱AI (GLM)', icon: '🔴', category: 'litellm', hint: 'glm-4.6, glm-4.5-flash 等' }, { value: 'moonshot', label: 'Moonshot (Kimi)', icon: '🌙', category: 'litellm', hint: 'kimi-k2, kimi-k1.5 等' }, { value: 'ollama', label: 'Ollama 本地', icon: '🖥️', category: 'litellm', hint: 'llama3.3-70b, qwen3-8b 等' }, { value: 'baidu', label: '百度文心', icon: '📘', category: 'native', hint: 'ernie-4.5 (需要 API_KEY:SECRET_KEY)' }, { value: 'minimax', label: 'MiniMax', icon: '⚡', category: 'native', hint: 'minimax-m2, minimax-m1 等' }, { value: 'doubao', label: '字节豆包', icon: '🎯', category: 'native', hint: 'doubao-1.6-pro, doubao-1.5-pro 等' }, ]; const DEFAULT_MODELS: Record = { openai: 'gpt-5', claude: 'claude-sonnet-4.5', gemini: 'gemini-3-pro', deepseek: 'deepseek-v3.1-terminus', qwen: 'qwen3-max-instruct', zhipu: 'glm-4.6', moonshot: 'kimi-k2', ollama: 'llama3.3-70b', baidu: 'ernie-4.5', minimax: 'minimax-m2', doubao: 'doubao-1.6-pro', }; interface SystemConfigData { llmProvider: string; llmApiKey: string; llmModel: string; llmBaseUrl: string; llmTimeout: number; llmTemperature: number; llmMaxTokens: number; githubToken: string; gitlabToken: string; maxAnalyzeFiles: number; llmConcurrency: number; llmGapMs: number; outputLanguage: string; } export function SystemConfig() { const [config, setConfig] = useState(null); const [loading, setLoading] = useState(true); const [showApiKey, setShowApiKey] = useState(false); const [hasChanges, setHasChanges] = useState(false); const [testingLLM, setTestingLLM] = useState(false); const [llmTestResult, setLlmTestResult] = useState<{ success: boolean; message: string } | null>(null); useEffect(() => { loadConfig(); }, []); const loadConfig = async () => { try { setLoading(true); console.log('[SystemConfig] 开始加载配置...'); const backendConfig = await api.getUserConfig(); console.log('[SystemConfig] 后端返回的原始数据:', JSON.stringify(backendConfig, null, 2)); if (backendConfig) { const llmConfig = backendConfig.llmConfig || {}; const otherConfig = backendConfig.otherConfig || {}; const newConfig = { llmProvider: llmConfig.llmProvider || 'openai', llmApiKey: llmConfig.llmApiKey || '', llmModel: llmConfig.llmModel || '', llmBaseUrl: llmConfig.llmBaseUrl || '', llmTimeout: llmConfig.llmTimeout || 150000, llmTemperature: llmConfig.llmTemperature ?? 0.1, llmMaxTokens: llmConfig.llmMaxTokens || 4096, githubToken: otherConfig.githubToken || '', gitlabToken: otherConfig.gitlabToken || '', maxAnalyzeFiles: otherConfig.maxAnalyzeFiles || 50, llmConcurrency: otherConfig.llmConcurrency || 3, llmGapMs: otherConfig.llmGapMs || 2000, outputLanguage: otherConfig.outputLanguage || 'zh-CN', }; console.log('[SystemConfig] 解析后的配置:', newConfig); setConfig(newConfig); console.log('✓ 配置已加载:', { provider: llmConfig.llmProvider, hasApiKey: !!llmConfig.llmApiKey, model: llmConfig.llmModel, }); } else { console.warn('[SystemConfig] 后端返回空数据,使用默认配置'); setConfig({ llmProvider: 'openai', llmApiKey: '', llmModel: '', llmBaseUrl: '', llmTimeout: 150000, llmTemperature: 0.1, llmMaxTokens: 4096, githubToken: '', gitlabToken: '', maxAnalyzeFiles: 50, llmConcurrency: 3, llmGapMs: 2000, outputLanguage: 'zh-CN', }); } } catch (error) { console.error('Failed to load config:', error); setConfig({ llmProvider: 'openai', llmApiKey: '', llmModel: '', llmBaseUrl: '', llmTimeout: 150000, llmTemperature: 0.1, llmMaxTokens: 4096, githubToken: '', gitlabToken: '', maxAnalyzeFiles: 50, llmConcurrency: 3, llmGapMs: 2000, outputLanguage: 'zh-CN', }); } finally { setLoading(false); } }; const saveConfig = async () => { if (!config) return; try { const savedConfig = await api.updateUserConfig({ llmConfig: { llmProvider: config.llmProvider, llmApiKey: config.llmApiKey, llmModel: config.llmModel, llmBaseUrl: config.llmBaseUrl, llmTimeout: config.llmTimeout, llmTemperature: config.llmTemperature, llmMaxTokens: config.llmMaxTokens, }, otherConfig: { githubToken: config.githubToken, gitlabToken: config.gitlabToken, maxAnalyzeFiles: config.maxAnalyzeFiles, llmConcurrency: config.llmConcurrency, llmGapMs: config.llmGapMs, outputLanguage: config.outputLanguage, }, }); if (savedConfig) { const llmConfig = savedConfig.llmConfig || {}; const otherConfig = savedConfig.otherConfig || {}; setConfig({ llmProvider: llmConfig.llmProvider || config.llmProvider, llmApiKey: llmConfig.llmApiKey || '', llmModel: llmConfig.llmModel || '', llmBaseUrl: llmConfig.llmBaseUrl || '', llmTimeout: llmConfig.llmTimeout || 150000, llmTemperature: llmConfig.llmTemperature ?? 0.1, llmMaxTokens: llmConfig.llmMaxTokens || 4096, githubToken: otherConfig.githubToken || '', gitlabToken: otherConfig.gitlabToken || '', maxAnalyzeFiles: otherConfig.maxAnalyzeFiles || 50, llmConcurrency: otherConfig.llmConcurrency || 3, llmGapMs: otherConfig.llmGapMs || 2000, outputLanguage: otherConfig.outputLanguage || 'zh-CN', }); } setHasChanges(false); toast.success("配置已保存!"); } catch (error) { toast.error(`保存失败: ${error instanceof Error ? error.message : '未知错误'}`); } }; const resetConfig = async () => { if (!window.confirm("确定要重置为默认配置吗?")) return; try { await api.deleteUserConfig(); await loadConfig(); setHasChanges(false); toast.success("已重置为默认配置"); } catch (error) { toast.error(`重置失败: ${error instanceof Error ? error.message : '未知错误'}`); } }; const updateConfig = (key: keyof SystemConfigData, value: string | number) => { if (!config) return; setConfig(prev => prev ? { ...prev, [key]: value } : null); setHasChanges(true); }; const testLLMConnection = async () => { if (!config) return; if (!config.llmApiKey && config.llmProvider !== 'ollama') { toast.error('请先配置 API Key'); return; } setTestingLLM(true); setLlmTestResult(null); try { const result = await api.testLLMConnection({ provider: config.llmProvider, apiKey: config.llmApiKey, model: config.llmModel || undefined, baseUrl: config.llmBaseUrl || undefined, }); setLlmTestResult(result); if (result.success) toast.success(`连接成功!模型: ${result.model}`); else toast.error(`连接失败: ${result.message}`); } catch (error) { const msg = error instanceof Error ? error.message : '未知错误'; setLlmTestResult({ success: false, message: msg }); toast.error(`测试失败: ${msg}`); } finally { setTestingLLM(false); } }; if (loading || !config) { return (

加载配置中...

); } const currentProvider = LLM_PROVIDERS.find(p => p.value === config.llmProvider); const isConfigured = config.llmApiKey !== '' || config.llmProvider === 'ollama'; return (
{/* Status Bar */}
{isConfigured ? ( LLM 已配置 ({currentProvider?.label}) ) : ( 请配置 LLM API Key )}
{hasChanges && ( )}
LLM 配置 嵌入模型 分析参数 Git 集成 {/* LLM Config */}
{/* Provider Selection */}
{/* API Key */} {config.llmProvider !== 'ollama' && (
updateConfig('llmApiKey', e.target.value)} placeholder={config.llmProvider === 'baidu' ? 'API_KEY:SECRET_KEY 格式' : '输入你的 API Key'} className="h-12 cyber-input" />
)} {/* Model and Base URL */}
updateConfig('llmModel', e.target.value)} placeholder={`默认: ${DEFAULT_MODELS[config.llmProvider] || 'auto'}`} className="h-10 cyber-input" />
updateConfig('llmBaseUrl', e.target.value)} placeholder="留空使用官方地址,或填入中转站地址" className="h-10 cyber-input" />
{/* Test Connection */}
测试连接 验证配置是否正确
{llmTestResult && (
{llmTestResult.success ? ( ) : ( )} {llmTestResult.message}
)} {/* Advanced Parameters */}
高级参数
updateConfig('llmTimeout', Number(e.target.value))} className="h-10 cyber-input" />
updateConfig('llmTemperature', Number(e.target.value))} className="h-10 cyber-input" />
updateConfig('llmMaxTokens', Number(e.target.value))} className="h-10 cyber-input" />
{/* Usage Notes */}

配置说明

LiteLLM 统一适配: 大多数提供商通过 LiteLLM 统一处理,支持自动重试和负载均衡

原生适配器: 百度、MiniMax、豆包因 API 格式特殊,使用专用适配器

API 中转站: 在 Base URL 填入中转站地址即可,API Key 填中转站提供的 Key

{/* Embedding Config */} {/* Analysis Parameters */}
updateConfig('maxAnalyzeFiles', Number(e.target.value))} className="h-10 cyber-input" />

单次任务最多处理的文件数量

updateConfig('llmConcurrency', Number(e.target.value))} className="h-10 cyber-input" />

同时发送的 LLM 请求数量

updateConfig('llmGapMs', Number(e.target.value))} className="h-10 cyber-input" />

每个请求之间的延迟时间

代码审查结果的输出语言

{/* Git Integration */}
updateConfig('githubToken', e.target.value)} placeholder="ghp_xxxxxxxxxxxx" className="h-10 cyber-input" />

用于访问私有仓库。获取:{' '} github.com/settings/tokens

updateConfig('gitlabToken', e.target.value)} placeholder="glpat-xxxxxxxxxxxx" className="h-10 cyber-input" />

用于访问私有仓库。获取:{' '} gitlab.com/-/profile/personal_access_tokens

提示

• 公开仓库无需配置 Token

• 私有仓库需要配置对应平台的 Token

{/* Floating Save Button */} {hasChanges && (
)}
); }