feat: Lock LLM and embedding configurations to system environment variables, mask API keys, and refactor frontend logout.

This commit is contained in:
vinland100 2026-01-05 14:45:00 +08:00
parent 0176cb4d12
commit 2b0c7f5c2a
8 changed files with 200 additions and 551 deletions

View File

@ -668,15 +668,11 @@ async def _get_user_config(db: AsyncSession, user_id: Optional[str]) -> Optional
)
config = result.scalar_one_or_none()
if config and config.llm_config:
user_llm_config = json.loads(config.llm_config) if config.llm_config else {}
if config:
user_other_config = json.loads(config.other_config) if config.other_config else {}
user_llm_config = decrypt_config(user_llm_config, SENSITIVE_LLM_FIELDS)
user_other_config = decrypt_config(user_other_config, SENSITIVE_OTHER_FIELDS)
return {
"llmConfig": user_llm_config,
"otherConfig": user_other_config,
}
except Exception as e:
@ -746,41 +742,19 @@ async def _initialize_tools(
try:
await emit(f"🔍 正在初始化 RAG 系统...")
# 从用户配置中获取 embedding 配置
user_llm_config = (user_config or {}).get('llmConfig', {})
user_other_config = (user_config or {}).get('otherConfig', {})
user_embedding_config = user_other_config.get('embedding_config', {})
# 锁定模式Embedding 配置始终来自 settings (.env)
embedding_provider = settings.EMBEDDING_PROVIDER
embedding_model = settings.EMBEDDING_MODEL
# Embedding Provider 优先级:用户嵌入配置 > 环境变量
embedding_provider = (
user_embedding_config.get('provider') or
getattr(settings, 'EMBEDDING_PROVIDER', 'openai')
)
# Embedding Model 优先级:用户嵌入配置 > 环境变量
embedding_model = (
user_embedding_config.get('model') or
getattr(settings, 'EMBEDDING_MODEL', 'text-embedding-3-small')
)
# API Key 优先级:用户嵌入配置 > 环境变量 EMBEDDING_API_KEY > 用户 LLM 配置 > 环境变量 LLM_API_KEY
# 注意API Key 可以共享,因为很多用户使用同一个 OpenAI Key 做 LLM 和 Embedding
# API Key 优先级EMBEDDING_API_KEY > LLM_API_KEY
embedding_api_key = (
user_embedding_config.get('api_key') or
getattr(settings, 'EMBEDDING_API_KEY', None) or
user_llm_config.get('llmApiKey') or
getattr(settings, 'LLM_API_KEY', '') or
''
getattr(settings, "EMBEDDING_API_KEY", None) or
settings.LLM_API_KEY or
""
)
# Base URL 优先级:用户嵌入配置 > 环境变量 EMBEDDING_BASE_URL > None使用提供商默认地址
# 🔥 重要Base URL 不应该回退到 LLM 的 base_url因为 Embedding 和 LLM 可能使用完全不同的服务
# 例如LLM 使用 SiliconFlow但 Embedding 使用 HuggingFace
embedding_base_url = (
user_embedding_config.get('base_url') or
getattr(settings, 'EMBEDDING_BASE_URL', None) or
None
)
# Base URL 优先级EMBEDDING_BASE_URL > None
embedding_base_url = getattr(settings, "EMBEDDING_BASE_URL", None)
logger.info(f"RAG 配置: provider={embedding_provider}, model={embedding_model}, base_url={embedding_base_url or '(使用默认)'}")
await emit(f"📊 Embedding 配置: {embedding_provider}/{embedding_model}")

View File

@ -18,7 +18,7 @@ from app.core.encryption import encrypt_sensitive_data, decrypt_sensitive_data
router = APIRouter()
# 需要加密的敏感字段列表
# 需要加密的敏感字段列表 (LLM 已锁定至 .env此处仅保留其他配置)
SENSITIVE_LLM_FIELDS = [
'llmApiKey', 'geminiApiKey', 'openaiApiKey', 'claudeApiKey',
'qwenApiKey', 'deepseekApiKey', 'zhipuApiKey', 'moonshotApiKey',
@ -27,6 +27,15 @@ SENSITIVE_LLM_FIELDS = [
SENSITIVE_OTHER_FIELDS = ['githubToken', 'gitlabToken']
def mask_api_key(key: Optional[str]) -> str:
"""部分遮盖API Key显示前3位和后4位"""
if not key:
return ""
if len(key) <= 8:
return "***"
return f"{key[:3]}***{key[-4:]}"
def encrypt_config(config: dict, sensitive_fields: list) -> dict:
"""加密配置中的敏感字段"""
encrypted = config.copy()
@ -104,23 +113,23 @@ def get_default_config() -> dict:
return {
"llmConfig": {
"llmProvider": settings.LLM_PROVIDER,
"llmApiKey": "",
"llmApiKey": mask_api_key(settings.LLM_API_KEY),
"llmModel": settings.LLM_MODEL or "",
"llmBaseUrl": settings.LLM_BASE_URL or "",
"llmTimeout": settings.LLM_TIMEOUT * 1000, # 转换为毫秒
"llmTemperature": settings.LLM_TEMPERATURE,
"llmMaxTokens": settings.LLM_MAX_TOKENS,
"llmCustomHeaders": "",
"geminiApiKey": settings.GEMINI_API_KEY or "",
"openaiApiKey": settings.OPENAI_API_KEY or "",
"claudeApiKey": settings.CLAUDE_API_KEY or "",
"qwenApiKey": settings.QWEN_API_KEY or "",
"deepseekApiKey": settings.DEEPSEEK_API_KEY or "",
"zhipuApiKey": settings.ZHIPU_API_KEY or "",
"moonshotApiKey": settings.MOONSHOT_API_KEY or "",
"baiduApiKey": settings.BAIDU_API_KEY or "",
"minimaxApiKey": settings.MINIMAX_API_KEY or "",
"doubaoApiKey": settings.DOUBAO_API_KEY or "",
"geminiApiKey": mask_api_key(settings.GEMINI_API_KEY),
"openaiApiKey": mask_api_key(settings.OPENAI_API_KEY),
"claudeApiKey": mask_api_key(settings.CLAUDE_API_KEY),
"qwenApiKey": mask_api_key(settings.QWEN_API_KEY),
"deepseekApiKey": mask_api_key(settings.DEEPSEEK_API_KEY),
"zhipuApiKey": mask_api_key(settings.ZHIPU_API_KEY),
"moonshotApiKey": mask_api_key(settings.MOONSHOT_API_KEY),
"baiduApiKey": mask_api_key(settings.BAIDU_API_KEY),
"minimaxApiKey": mask_api_key(settings.MINIMAX_API_KEY),
"doubaoApiKey": mask_api_key(settings.DOUBAO_API_KEY),
"ollamaBaseUrl": settings.OLLAMA_BASE_URL or "http://localhost:11434/v1",
},
"otherConfig": {
@ -178,7 +187,8 @@ async def get_my_config(
print(f" - llmApiKey: {'***' + user_llm_config.get('llmApiKey', '')[-4:] if user_llm_config.get('llmApiKey') else '(空)'}")
print(f" - llmModel: {user_llm_config.get('llmModel')}")
merged_llm_config = {**default_config["llmConfig"], **user_llm_config}
# LLM配置始终来自系统默认.env不再允许用户覆盖
merged_llm_config = default_config["llmConfig"]
merged_other_config = {**default_config["otherConfig"], **user_other_config}
return UserConfigResponse(
@ -247,7 +257,8 @@ async def update_my_config(
user_llm_config = decrypt_config(user_llm_config, SENSITIVE_LLM_FIELDS)
user_other_config = decrypt_config(user_other_config, SENSITIVE_OTHER_FIELDS)
merged_llm_config = {**default_config["llmConfig"], **user_llm_config}
# LLM配置始终来自系统默认.env不再允许用户覆盖
merged_llm_config = default_config["llmConfig"]
merged_other_config = {**default_config["otherConfig"], **user_other_config}
return UserConfigResponse(
@ -299,230 +310,52 @@ class LLMTestResponse(BaseModel):
@router.post("/test-llm", response_model=LLMTestResponse)
async def test_llm_connection(
request: LLMTestRequest,
db: AsyncSession = Depends(get_db),
current_user: User = Depends(deps.get_current_user),
) -> Any:
"""测试LLM连接是否正常"""
from app.services.llm.factory import LLMFactory, NATIVE_ONLY_PROVIDERS
from app.services.llm.adapters import LiteLLMAdapter, BaiduAdapter, MinimaxAdapter, DoubaoAdapter
from app.services.llm.types import LLMConfig, LLMProvider, LLMRequest, LLMMessage, DEFAULT_MODELS, DEFAULT_BASE_URLS
import traceback
"""测试当前系统 LLM 配置是否正常"""
from app.services.llm.service import LLMService
import time
import traceback
start_time = time.time()
# 获取用户保存的配置
result = await db.execute(
select(UserConfig).where(UserConfig.user_id == current_user.id)
)
user_config_record = result.scalar_one_or_none()
# 解析用户配置
saved_llm_config = {}
saved_other_config = {}
if user_config_record:
if user_config_record.llm_config:
saved_llm_config = decrypt_config(
json.loads(user_config_record.llm_config),
SENSITIVE_LLM_FIELDS
)
if user_config_record.other_config:
saved_other_config = decrypt_config(
json.loads(user_config_record.other_config),
SENSITIVE_OTHER_FIELDS
)
# 从保存的配置中获取参数(用于调试显示)
saved_timeout_ms = saved_llm_config.get('llmTimeout', settings.LLM_TIMEOUT * 1000)
saved_temperature = saved_llm_config.get('llmTemperature', settings.LLM_TEMPERATURE)
saved_max_tokens = saved_llm_config.get('llmMaxTokens', settings.LLM_MAX_TOKENS)
saved_concurrency = saved_other_config.get('llmConcurrency', settings.LLM_CONCURRENCY)
saved_gap_ms = saved_other_config.get('llmGapMs', settings.LLM_GAP_MS)
saved_max_files = saved_other_config.get('maxAnalyzeFiles', settings.MAX_ANALYZE_FILES)
saved_output_lang = saved_other_config.get('outputLanguage', settings.OUTPUT_LANGUAGE)
debug_info = {
"provider": request.provider,
"model_requested": request.model,
"base_url_requested": request.baseUrl,
"api_key_length": len(request.apiKey) if request.apiKey else 0,
"api_key_prefix": request.apiKey[:8] + "..." if request.apiKey and len(request.apiKey) > 8 else "(empty)",
# 用户保存的配置参数
"saved_config": {
"timeout_ms": saved_timeout_ms,
"temperature": saved_temperature,
"max_tokens": saved_max_tokens,
"concurrency": saved_concurrency,
"gap_ms": saved_gap_ms,
"max_analyze_files": saved_max_files,
"output_language": saved_output_lang,
},
}
try:
# 解析provider
provider_map = {
'gemini': LLMProvider.GEMINI,
'openai': LLMProvider.OPENAI,
'claude': LLMProvider.CLAUDE,
'qwen': LLMProvider.QWEN,
'deepseek': LLMProvider.DEEPSEEK,
'zhipu': LLMProvider.ZHIPU,
'moonshot': LLMProvider.MOONSHOT,
'baidu': LLMProvider.BAIDU,
'minimax': LLMProvider.MINIMAX,
'doubao': LLMProvider.DOUBAO,
'ollama': LLMProvider.OLLAMA,
}
# LLMService 已经重构为锁定读取 .env 配置
llm_service = LLMService()
provider = provider_map.get(request.provider.lower())
if not provider:
debug_info["error_type"] = "unsupported_provider"
return LLMTestResponse(
success=False,
message=f"不支持的LLM提供商: {request.provider}",
debug=debug_info
)
# 记录测试信息
print(f"🔍 测试 LLM 连接: Provider={llm_service.config.provider}, Model={llm_service.config.model}")
# 获取默认模型
model = request.model or DEFAULT_MODELS.get(provider)
base_url = request.baseUrl or DEFAULT_BASE_URLS.get(provider, "")
# 简单测试:获取分析结果
test_code = "print('hello world')"
result = await llm_service.analyze_code(test_code, "python")
# 测试时使用用户保存的所有配置参数
test_timeout = int(saved_timeout_ms / 1000) if saved_timeout_ms else settings.LLM_TIMEOUT
test_temperature = saved_temperature if saved_temperature is not None else settings.LLM_TEMPERATURE
test_max_tokens = saved_max_tokens if saved_max_tokens else settings.LLM_MAX_TOKENS
debug_info["model_used"] = model
debug_info["base_url_used"] = base_url
debug_info["is_native_adapter"] = provider in NATIVE_ONLY_PROVIDERS
debug_info["test_params"] = {
"timeout": test_timeout,
"temperature": test_temperature,
"max_tokens": test_max_tokens,
}
print(f"[LLM Test] 开始测试: provider={provider.value}, model={model}, base_url={base_url}, temperature={test_temperature}, timeout={test_timeout}s, max_tokens={test_max_tokens}")
# 创建配置
config = LLMConfig(
provider=provider,
api_key=request.apiKey,
model=model,
base_url=request.baseUrl,
timeout=test_timeout,
temperature=test_temperature,
max_tokens=test_max_tokens,
)
# 直接创建新的适配器实例(不使用缓存),确保使用最新的配置
if provider in NATIVE_ONLY_PROVIDERS:
native_adapter_map = {
LLMProvider.BAIDU: BaiduAdapter,
LLMProvider.MINIMAX: MinimaxAdapter,
LLMProvider.DOUBAO: DoubaoAdapter,
}
adapter = native_adapter_map[provider](config)
debug_info["adapter_type"] = type(adapter).__name__
else:
adapter = LiteLLMAdapter(config)
debug_info["adapter_type"] = "LiteLLMAdapter"
# 获取 LiteLLM 实际使用的模型名
debug_info["litellm_model"] = getattr(adapter, '_get_litellm_model', lambda: model)() if hasattr(adapter, '_get_litellm_model') else model
test_request = LLMRequest(
messages=[
LLMMessage(role="user", content="Say 'Hello' in one word.")
],
temperature=test_temperature,
max_tokens=test_max_tokens,
)
print(f"[LLM Test] 发送测试请求...")
response = await adapter.complete(test_request)
elapsed_time = time.time() - start_time
debug_info["elapsed_time_ms"] = round(elapsed_time * 1000, 2)
# 验证响应内容
if not response or not response.content:
debug_info["error_type"] = "empty_response"
debug_info["raw_response"] = str(response) if response else None
print(f"[LLM Test] 空响应: {response}")
return LLMTestResponse(
success=False,
message="LLM 返回空响应,请检查 API Key 和配置",
debug=debug_info
)
debug_info["response_length"] = len(response.content)
debug_info["usage"] = {
"prompt_tokens": getattr(response, 'prompt_tokens', None),
"completion_tokens": getattr(response, 'completion_tokens', None),
"total_tokens": getattr(response, 'total_tokens', None),
}
print(f"[LLM Test] 成功! 响应: {response.content[:50]}... 耗时: {elapsed_time:.2f}s")
duration = round(time.time() - start_time, 2)
return LLMTestResponse(
success=True,
message=f"连接成功 ({elapsed_time:.2f}s)",
model=model,
response=response.content[:100] if response.content else None,
debug=debug_info
message=f"连接成功!耗时: {duration}s",
model=llm_service.config.model,
response="分析测试完成",
debug={
"provider": llm_service.config.provider.value,
"model": llm_service.config.model,
"duration_s": duration,
"issues_found": len(result.get("issues", []))
}
)
except Exception as e:
elapsed_time = time.time() - start_time
duration = round(time.time() - start_time, 2)
error_msg = str(e)
error_type = type(e).__name__
debug_info["elapsed_time_ms"] = round(elapsed_time * 1000, 2)
debug_info["error_type"] = error_type
debug_info["error_message"] = error_msg
debug_info["traceback"] = traceback.format_exc()
# 提取 LLMError 中的 api_response
if hasattr(e, 'api_response') and e.api_response:
debug_info["api_response"] = e.api_response
if hasattr(e, 'status_code') and e.status_code:
debug_info["status_code"] = e.status_code
print(f"[LLM Test] 失败: {error_type}: {error_msg}")
print(f"[LLM Test] Traceback:\n{traceback.format_exc()}")
# 提供更友好的错误信息
friendly_message = error_msg
# 优先检查余额不足(因为某些 API 用 429 表示余额不足)
if any(keyword in error_msg for keyword in ["余额不足", "资源包", "充值", "quota", "insufficient", "balance", "402"]):
friendly_message = "账户余额不足或配额已用尽,请充值后重试"
debug_info["error_category"] = "insufficient_balance"
elif "401" in error_msg or "invalid_api_key" in error_msg.lower() or "incorrect api key" in error_msg.lower():
friendly_message = "API Key 无效或已过期,请检查后重试"
debug_info["error_category"] = "auth_invalid_key"
elif "authentication" in error_msg.lower():
friendly_message = "认证失败,请检查 API Key 是否正确"
debug_info["error_category"] = "auth_failed"
elif "timeout" in error_msg.lower():
friendly_message = "连接超时,请检查网络或 API 地址是否正确"
debug_info["error_category"] = "timeout"
elif "connection" in error_msg.lower() or "connect" in error_msg.lower():
friendly_message = "无法连接到 API 服务,请检查网络或 API 地址"
debug_info["error_category"] = "connection"
elif "rate" in error_msg.lower() and "limit" in error_msg.lower():
friendly_message = "API 请求频率超限,请稍后重试"
debug_info["error_category"] = "rate_limit"
elif "model" in error_msg.lower() and ("not found" in error_msg.lower() or "does not exist" in error_msg.lower()):
friendly_message = f"模型 '{debug_info.get('model_used', 'unknown')}' 不存在或无权访问"
debug_info["error_category"] = "model_not_found"
else:
debug_info["error_category"] = "unknown"
print(f"❌ LLM 测试失败: {error_msg}")
return LLMTestResponse(
success=False,
message=friendly_message,
debug=debug_info
message=f"连接失败: {error_msg}",
debug={
"error": error_msg,
"traceback": traceback.format_exc(),
"duration_s": duration
}
)

View File

@ -172,83 +172,29 @@ EMBEDDING_PROVIDERS: List[EmbeddingProvider] = [
EMBEDDING_CONFIG_KEY = "embedding_config"
def mask_api_key(key: Optional[str]) -> str:
"""部分遮盖API Key显示前3位和后4位"""
if not key:
return ""
if len(key) <= 8:
return "***"
return f"{key[:3]}***{key[-4:]}"
async def get_embedding_config_from_db(db: AsyncSession, user_id: str) -> EmbeddingConfig:
"""从数据库获取嵌入配置(异步)"""
result = await db.execute(
select(UserConfig).where(UserConfig.user_id == user_id)
)
user_config = result.scalar_one_or_none()
if user_config and user_config.other_config:
try:
other_config = json.loads(user_config.other_config) if isinstance(user_config.other_config, str) else user_config.other_config
embedding_data = other_config.get(EMBEDDING_CONFIG_KEY)
if embedding_data:
config = EmbeddingConfig(
provider=embedding_data.get("provider", settings.EMBEDDING_PROVIDER),
model=embedding_data.get("model", settings.EMBEDDING_MODEL),
api_key=embedding_data.get("api_key"),
base_url=embedding_data.get("base_url"),
dimensions=embedding_data.get("dimensions"),
batch_size=embedding_data.get("batch_size", 100),
)
print(f"[EmbeddingConfig] 读取用户 {user_id} 的嵌入配置: provider={config.provider}, model={config.model}")
return config
except (json.JSONDecodeError, AttributeError) as e:
print(f"[EmbeddingConfig] 解析用户 {user_id} 配置失败: {e}")
# 返回默认配置
print(f"[EmbeddingConfig] 用户 {user_id} 无保存配置,返回默认值")
# 嵌入配置始终来自系统默认(.env不再允许用户覆盖
print(f"[EmbeddingConfig] 返回系统默认嵌入配置(来自 .env")
return EmbeddingConfig(
provider=settings.EMBEDDING_PROVIDER,
model=settings.EMBEDDING_MODEL,
api_key=settings.LLM_API_KEY,
base_url=settings.LLM_BASE_URL,
api_key=mask_api_key(settings.EMBEDDING_API_KEY or settings.LLM_API_KEY),
base_url=settings.EMBEDDING_BASE_URL or settings.LLM_BASE_URL,
dimensions=settings.EMBEDDING_DIMENSION if settings.EMBEDDING_DIMENSION > 0 else None,
batch_size=100,
)
async def save_embedding_config_to_db(db: AsyncSession, user_id: str, config: EmbeddingConfig) -> None:
"""保存嵌入配置到数据库(异步)"""
result = await db.execute(
select(UserConfig).where(UserConfig.user_id == user_id)
)
user_config = result.scalar_one_or_none()
# 准备嵌入配置数据
embedding_data = {
"provider": config.provider,
"model": config.model,
"api_key": config.api_key,
"base_url": config.base_url,
"dimensions": config.dimensions,
"batch_size": config.batch_size,
}
if user_config:
# 更新现有配置
try:
other_config = json.loads(user_config.other_config) if user_config.other_config else {}
except (json.JSONDecodeError, TypeError):
other_config = {}
other_config[EMBEDDING_CONFIG_KEY] = embedding_data
user_config.other_config = json.dumps(other_config)
# 🔥 显式标记 other_config 字段已修改,确保 SQLAlchemy 检测到变化
flag_modified(user_config, "other_config")
else:
# 创建新配置
user_config = UserConfig(
id=str(uuid.uuid4()),
user_id=user_id,
llm_config="{}",
other_config=json.dumps({EMBEDDING_CONFIG_KEY: embedding_data}),
)
db.add(user_config)
await db.commit()
print(f"[EmbeddingConfig] 已保存用户 {user_id} 的嵌入配置: provider={config.provider}, model={config.model}")
# ============ API Endpoints ============
@ -274,6 +220,8 @@ async def get_current_config(
config = await get_embedding_config_from_db(db, current_user.id)
# 获取维度
dimensions = config.dimensions
if not dimensions or dimensions <= 0:
dimensions = _get_model_dimensions(config.provider, config.model)
return EmbeddingConfigResponse(
@ -288,30 +236,12 @@ async def get_current_config(
@router.put("/config")
async def update_config(
config: EmbeddingConfig,
db: AsyncSession = Depends(deps.get_db),
current_user: User = Depends(deps.get_current_user),
) -> Any:
"""
更新嵌入模型配置持久化到数据库
更新嵌入模型配置已禁用固定从 .env 读取
"""
# 验证提供商
provider_ids = [p.id for p in EMBEDDING_PROVIDERS]
if config.provider not in provider_ids:
raise HTTPException(status_code=400, detail=f"不支持的提供商: {config.provider}")
# 获取提供商信息(用于检查 API Key 要求)
provider = next((p for p in EMBEDDING_PROVIDERS if p.id == config.provider), None)
# 注意:不再强制验证模型名称,允许用户输入自定义模型
# 检查 API Key
if provider and provider.requires_api_key and not config.api_key:
raise HTTPException(status_code=400, detail=f"{config.provider} 需要 API Key")
# 保存到数据库
await save_embedding_config_to_db(db, current_user.id, config)
return {"message": "配置已保存", "provider": config.provider, "model": config.model}
return {"message": "嵌入模型配置已锁定,请在 .env 文件中进行修改", "provider": settings.EMBEDDING_PROVIDER, "model": settings.EMBEDDING_MODEL}
@router.post("/test", response_model=TestEmbeddingResponse)
@ -319,22 +249,22 @@ async def test_embedding(
request: TestEmbeddingRequest,
current_user: User = Depends(deps.get_current_user),
) -> Any:
"""
测试嵌入模型配置
"""
"""测试当前系统的嵌入模型配置"""
import time
from app.services.rag.embeddings import EmbeddingService
try:
start_time = time.time()
# 创建临时嵌入服务
from app.services.rag.embeddings import EmbeddingService
# 始终使用系统的真实配置进行测试
# API Key 优先级EMBEDDING_API_KEY > LLM_API_KEY
api_key = getattr(settings, "EMBEDDING_API_KEY", None) or settings.LLM_API_KEY
service = EmbeddingService(
provider=request.provider,
model=request.model,
api_key=request.api_key,
base_url=request.base_url,
provider=settings.EMBEDDING_PROVIDER,
model=settings.EMBEDDING_MODEL,
api_key=api_key,
base_url=settings.EMBEDDING_BASE_URL,
cache_enabled=False,
)
@ -345,13 +275,14 @@ async def test_embedding(
return TestEmbeddingResponse(
success=True,
message=f"嵌入成功! 维度: {len(embedding)}",
message=f"嵌入成功! 提供商: {settings.EMBEDDING_PROVIDER}, 维度: {len(embedding)}",
dimensions=len(embedding),
sample_embedding=embedding[:5], # 返回前 5 维
latency_ms=latency_ms,
)
except Exception as e:
print(f"❌ 嵌入测试失败: {str(e)}")
return TestEmbeddingResponse(
success=False,
message=f"嵌入失败: {str(e)}",

View File

@ -393,12 +393,6 @@ async def get_user_config_dict(db: AsyncSession, user_id: str) -> dict:
"""获取用户配置字典(包含解密敏感字段)"""
from app.core.encryption import decrypt_sensitive_data
# 需要解密的敏感字段列表(与 config.py 保持一致)
SENSITIVE_LLM_FIELDS = [
'llmApiKey', 'geminiApiKey', 'openaiApiKey', 'claudeApiKey',
'qwenApiKey', 'deepseekApiKey', 'zhipuApiKey', 'moonshotApiKey',
'baiduApiKey', 'minimaxApiKey', 'doubaoApiKey'
]
SENSITIVE_OTHER_FIELDS = ['githubToken', 'gitlabToken']
def decrypt_config(config: dict, sensitive_fields: list) -> dict:
@ -416,16 +410,13 @@ async def get_user_config_dict(db: AsyncSession, user_id: str) -> dict:
if not config:
return {}
# 解析配置
llm_config = json.loads(config.llm_config) if config.llm_config else {}
# 解析配置 (忽略 llm_config)
other_config = json.loads(config.other_config) if config.other_config else {}
# 解密敏感字段
llm_config = decrypt_config(llm_config, SENSITIVE_LLM_FIELDS)
other_config = decrypt_config(other_config, SENSITIVE_OTHER_FIELDS)
return {
'llmConfig': llm_config,
'otherConfig': other_config,
}

View File

@ -39,55 +39,31 @@ class LLMService:
"""
获取LLM配置
🔥 优先级从高到低
1. 数据库用户配置系统配置页面保存的配置
2. 环境变量配置.env 文件中的配置
如果用户配置中某个字段为空则自动回退到环境变量
🔥 锁定模式始终从环境变量.env读取
不再合并数据库中的用户配置确保系统一致性和安全性
"""
if self._config is None:
user_llm_config = self._user_config.get('llmConfig', {})
# 🔥 Provider 优先级:用户配置 > 环境变量
provider_str = user_llm_config.get('llmProvider') or getattr(settings, 'LLM_PROVIDER', 'openai')
# 锁定:全部来自 settings
provider_str = settings.LLM_PROVIDER
provider = self._parse_provider(provider_str)
# 🔥 API Key 优先级:用户配置 > 环境变量通用配置 > 环境变量平台专属配置
api_key = (
user_llm_config.get('llmApiKey') or
getattr(settings, 'LLM_API_KEY', '') or
self._get_provider_api_key_from_user_config(provider, user_llm_config) or
self._get_provider_api_key(provider)
)
# API Key 优先级:平台专属配置 > 通用 LLM_API_KEY
api_key = self._get_provider_api_key(provider) or settings.LLM_API_KEY
# 🔥 Base URL 优先级:用户配置 > 环境变量
base_url = (
user_llm_config.get('llmBaseUrl') or
getattr(settings, 'LLM_BASE_URL', None) or
self._get_provider_base_url(provider)
)
# Base URL 优先级:通用 LLM_BASE_URL > 平台默认
base_url = settings.LLM_BASE_URL or self._get_provider_base_url(provider)
# 🔥 Model 优先级:用户配置 > 环境变量 > 默认模型
model = (
user_llm_config.get('llmModel') or
getattr(settings, 'LLM_MODEL', '') or
DEFAULT_MODELS.get(provider, 'gpt-4o-mini')
)
# Model
model = settings.LLM_MODEL or DEFAULT_MODELS.get(provider, 'gpt-4o-mini')
# 🔥 Timeout 优先级:用户配置(毫秒) > 环境变量(秒)
timeout_ms = user_llm_config.get('llmTimeout')
if timeout_ms:
# 用户配置是毫秒,转换为秒
timeout = int(timeout_ms / 1000) if timeout_ms > 1000 else int(timeout_ms)
else:
# 环境变量是秒
timeout = int(getattr(settings, 'LLM_TIMEOUT', 150))
# Timeout (settings 中是秒)
timeout = int(settings.LLM_TIMEOUT)
# 🔥 Temperature 优先级:用户配置 > 环境变量
temperature = user_llm_config.get('llmTemperature') if user_llm_config.get('llmTemperature') is not None else float(getattr(settings, 'LLM_TEMPERATURE', 0.1))
# Temperature
temperature = float(settings.LLM_TEMPERATURE)
# 🔥 Max Tokens 优先级:用户配置 > 环境变量
max_tokens = user_llm_config.get('llmMaxTokens') or int(getattr(settings, 'LLM_MAX_TOKENS', 4096))
# Max Tokens
max_tokens = int(settings.LLM_MAX_TOKENS)
self._config = LLMConfig(
provider=provider,
@ -100,27 +76,8 @@ class LLMService:
)
return self._config
def _get_provider_api_key_from_user_config(self, provider: LLMProvider, user_llm_config: Dict[str, Any]) -> Optional[str]:
"""从用户配置中获取平台专属API Key"""
provider_key_map = {
LLMProvider.OPENAI: 'openaiApiKey',
LLMProvider.GEMINI: 'geminiApiKey',
LLMProvider.CLAUDE: 'claudeApiKey',
LLMProvider.QWEN: 'qwenApiKey',
LLMProvider.DEEPSEEK: 'deepseekApiKey',
LLMProvider.ZHIPU: 'zhipuApiKey',
LLMProvider.MOONSHOT: 'moonshotApiKey',
LLMProvider.BAIDU: 'baiduApiKey',
LLMProvider.MINIMAX: 'minimaxApiKey',
LLMProvider.DOUBAO: 'doubaoApiKey',
}
key_name = provider_key_map.get(provider)
if key_name:
return user_llm_config.get(key_name)
return None
def _get_provider_api_key(self, provider: LLMProvider) -> str:
"""根据提供商获取API Key"""
"""根据提供商从 settings 获取专属 API Key"""
provider_key_map = {
LLMProvider.OPENAI: 'OPENAI_API_KEY',
LLMProvider.GEMINI: 'GEMINI_API_KEY',
@ -132,12 +89,12 @@ class LLMService:
LLMProvider.BAIDU: 'BAIDU_API_KEY',
LLMProvider.MINIMAX: 'MINIMAX_API_KEY',
LLMProvider.DOUBAO: 'DOUBAO_API_KEY',
LLMProvider.OLLAMA: None, # Ollama 不需要 API Key
}
key_name = provider_key_map.get(provider)
if key_name:
return getattr(settings, key_name, '') or ''
return 'ollama' # Ollama的默认值
return ''
def _get_provider_base_url(self, provider: LLMProvider) -> Optional[str]:
"""根据提供商获取Base URL"""

View File

@ -208,7 +208,7 @@ export default function EmbeddingConfigPanel() {
<div className="bg-muted p-3 rounded-lg border border-border">
<p className="text-xs text-muted-foreground uppercase mb-1"></p>
<Badge className="bg-primary/20 text-primary border-primary/50 font-mono">
{currentConfig.provider}
{providers.find(p => p.id === currentConfig.provider)?.name || currentConfig.provider}
</Badge>
</div>
<div className="bg-muted p-3 rounded-lg border border-border">
@ -231,8 +231,11 @@ export default function EmbeddingConfigPanel() {
<div className="cyber-card p-6 space-y-6">
{/* 提供商选择 */}
<div className="space-y-2">
<Label className="text-xs font-bold text-muted-foreground uppercase"></Label>
<Select value={selectedProvider} onValueChange={handleProviderChange}>
<Label className="text-xs font-bold text-muted-foreground uppercase flex items-center justify-between">
<span></span>
<span className="text-[10px] text-amber-500/80 normal-case font-normal border border-amber-500/30 px-1 rounded">.env ()</span>
</Label>
<Select value={selectedProvider} onValueChange={handleProviderChange} disabled>
<SelectTrigger className="h-12 cyber-input">
<SelectValue placeholder="选择提供商" />
</SelectTrigger>
@ -270,6 +273,7 @@ export default function EmbeddingConfigPanel() {
onChange={(e) => setSelectedModel(e.target.value)}
placeholder="输入模型名称"
className="h-10 cyber-input"
disabled
/>
{selectedProviderInfo.models.length > 0 && (
<div className="flex flex-wrap gap-2 mt-2">
@ -279,8 +283,7 @@ export default function EmbeddingConfigPanel() {
key={model}
type="button"
onClick={() => setSelectedModel(model)}
className={`px-2 py-1 text-xs font-mono rounded border transition-colors ${
selectedModel === model
className={`px-2 py-1 text-xs font-mono rounded border transition-colors ${selectedModel === model
? "bg-primary/20 border-primary/50 text-primary"
: "bg-muted border-border text-muted-foreground hover:border-border hover:text-foreground"
}`}
@ -306,6 +309,7 @@ export default function EmbeddingConfigPanel() {
onChange={(e) => setApiKey(e.target.value)}
placeholder="输入 API Key"
className="h-10 cyber-input"
disabled
/>
<p className="text-xs text-muted-foreground">
API Key
@ -334,6 +338,7 @@ export default function EmbeddingConfigPanel() {
: "https://api.openai.com/v1"
}
className="h-10 cyber-input"
disabled
/>
<p className="text-xs text-muted-foreground">
API
@ -350,6 +355,7 @@ export default function EmbeddingConfigPanel() {
min={1}
max={500}
className="h-10 cyber-input w-32"
disabled
/>
<p className="text-xs text-muted-foreground">
50-100
@ -359,8 +365,7 @@ export default function EmbeddingConfigPanel() {
{/* 测试结果 */}
{testResult && (
<div
className={`p-4 rounded-lg ${
testResult.success
className={`p-4 rounded-lg ${testResult.success
? "bg-emerald-500/10 border border-emerald-500/30"
: "bg-rose-500/10 border border-rose-500/30"
}`}
@ -372,8 +377,7 @@ export default function EmbeddingConfigPanel() {
<AlertCircle className="w-5 h-5 text-rose-400" />
)}
<span
className={`font-bold ${
testResult.success ? "text-emerald-400" : "text-rose-400"
className={`font-bold ${testResult.success ? "text-emerald-400" : "text-rose-400"
}`}
>
{testResult.success ? "测试成功" : "测试失败"}
@ -398,37 +402,26 @@ export default function EmbeddingConfigPanel() {
<div className="flex items-center gap-3 pt-4 border-t border-border border-dashed">
<Button
onClick={handleTest}
disabled={testing || !selectedProvider || !selectedModel}
disabled={testing}
variant="outline"
className="cyber-btn-outline h-10"
className="h-10 border-orange-500/30 hover:bg-orange-500/10 text-orange-400"
>
{testing ? (
<Loader2 className="w-4 h-4 mr-2 animate-spin" />
) : (
<PlayCircle className="w-4 h-4 mr-2" />
<Zap className="w-4 h-4 mr-2" />
)}
</Button>
<Button
onClick={handleSave}
disabled={saving || !selectedProvider || !selectedModel}
className="cyber-btn-primary h-10"
>
{saving ? (
<Loader2 className="w-4 h-4 mr-2 animate-spin" />
) : (
<Check className="w-4 h-4 mr-2" />
)}
</Button>
<Button
onClick={loadData}
variant="ghost"
className="cyber-btn-ghost ml-auto h-10"
size="icon"
className="h-10 w-10 text-muted-foreground hover:text-foreground ml-auto"
title="刷新配置"
>
<RefreshCw className="w-4 h-4" />
<RefreshCw className={`w-4 h-4 ${loading ? 'animate-spin' : ''}`} />
</Button>
</div>
</div>

View File

@ -339,7 +339,7 @@ export function SystemConfig() {
</span>
) : (
<span className="text-amber-400 flex items-center gap-2">
<AlertCircle className="h-4 w-4" /> LLM API Key
<AlertCircle className="h-4 w-4" /> .env LLM ()
</span>
)}
</span>
@ -378,8 +378,11 @@ export function SystemConfig() {
<div className="cyber-card p-6 space-y-6">
{/* Provider Selection */}
<div className="space-y-2">
<Label className="text-xs font-bold text-muted-foreground uppercase"> LLM </Label>
<Select value={config.llmProvider} onValueChange={(v) => updateConfig('llmProvider', v)}>
<Label className="text-xs font-bold text-muted-foreground uppercase flex items-center justify-between">
<span> LLM </span>
<span className="text-[10px] text-amber-500/80 normal-case font-normal border border-amber-500/30 px-1 rounded">.env ()</span>
</Label>
<Select value={config.llmProvider} onValueChange={(v) => updateConfig('llmProvider', v)} disabled>
<SelectTrigger className="h-12 cyber-input">
<SelectValue />
</SelectTrigger>
@ -419,6 +422,7 @@ export function SystemConfig() {
onChange={(e) => updateConfig('llmApiKey', e.target.value)}
placeholder={config.llmProvider === 'baidu' ? 'API_KEY:SECRET_KEY 格式' : '输入你的 API Key'}
className="h-12 cyber-input"
disabled
/>
<Button
variant="outline"
@ -441,6 +445,7 @@ export function SystemConfig() {
onChange={(e) => updateConfig('llmModel', e.target.value)}
placeholder={`默认: ${DEFAULT_MODELS[config.llmProvider] || 'auto'}`}
className="h-10 cyber-input"
disabled
/>
</div>
<div className="space-y-2">
@ -450,6 +455,7 @@ export function SystemConfig() {
onChange={(e) => updateConfig('llmBaseUrl', e.target.value)}
placeholder="留空使用官方地址,或填入中转站地址"
className="h-10 cyber-input"
disabled
/>
</div>
</div>
@ -462,7 +468,7 @@ export function SystemConfig() {
</div>
<Button
onClick={testLLMConnection}
disabled={testingLLM || (!isConfigured && config.llmProvider !== 'ollama')}
disabled={testingLLM}
className="cyber-btn-primary h-10"
>
{testingLLM ? (
@ -503,71 +509,28 @@ export function SystemConfig() {
{showDebugInfo && llmTestResult.debug && (
<div className="mt-3 pt-3 border-t border-border/50">
<div className="text-xs font-mono space-y-1 text-muted-foreground">
<div className="font-bold text-foreground mb-2">:</div>
<div>Provider: <span className="text-foreground">{String(llmTestResult.debug.provider)}</span></div>
<div>Model: <span className="text-foreground">{String(llmTestResult.debug.model_used || llmTestResult.debug.model_requested || 'N/A')}</span></div>
<div>Base URL: <span className="text-foreground">{String(llmTestResult.debug.base_url_used || llmTestResult.debug.base_url_requested || '(default)')}</span></div>
<div>Adapter: <span className="text-foreground">{String(llmTestResult.debug.adapter_type || 'N/A')}</span></div>
<div>API Key: <span className="text-foreground">{String(llmTestResult.debug.api_key_prefix)} (: {String(llmTestResult.debug.api_key_length)})</span></div>
<div>: <span className="text-foreground">{String(llmTestResult.debug.elapsed_time_ms || 'N/A')} ms</span></div>
{/* 用户保存的配置参数 */}
{llmTestResult.debug.saved_config && (
<div className="mt-3 pt-2 border-t border-border/30">
<div className="font-bold text-cyan-400 mb-2">:</div>
<div className="grid grid-cols-2 gap-x-4 gap-y-1">
<div>: <span className="text-foreground">{String((llmTestResult.debug.saved_config as Record<string, unknown>).temperature ?? 'N/A')}</span></div>
<div>Tokens: <span className="text-foreground">{String((llmTestResult.debug.saved_config as Record<string, unknown>).max_tokens ?? 'N/A')}</span></div>
<div>: <span className="text-foreground">{String((llmTestResult.debug.saved_config as Record<string, unknown>).timeout_ms ?? 'N/A')} ms</span></div>
<div>: <span className="text-foreground">{String((llmTestResult.debug.saved_config as Record<string, unknown>).gap_ms ?? 'N/A')} ms</span></div>
<div>: <span className="text-foreground">{String((llmTestResult.debug.saved_config as Record<string, unknown>).concurrency ?? 'N/A')}</span></div>
<div>: <span className="text-foreground">{String((llmTestResult.debug.saved_config as Record<string, unknown>).max_analyze_files ?? 'N/A')}</span></div>
<div>: <span className="text-foreground">{String((llmTestResult.debug.saved_config as Record<string, unknown>).output_language ?? 'N/A')}</span></div>
</div>
</div>
<div className="font-bold text-foreground mb-2">:</div>
{!!llmTestResult.debug.provider && (
<div>: <span className="text-foreground">{String(llmTestResult.debug.provider)}</span></div>
)}
{!!llmTestResult.debug.model && (
<div>: <span className="text-foreground">{String(llmTestResult.debug.model)}</span></div>
)}
{!!llmTestResult.debug.duration_s && (
<div>: <span className="text-foreground">{String(llmTestResult.debug.duration_s)}s</span></div>
)}
{llmTestResult.debug.issues_found !== undefined && (
<div>: <span className="text-emerald-400"> {String(llmTestResult.debug.issues_found)} ()</span></div>
)}
{/* 测试时实际使用的参数 */}
{llmTestResult.debug.test_params && (
<div className="mt-2 pt-2 border-t border-border/30">
<div className="font-bold text-emerald-400 mb-2">使:</div>
<div className="grid grid-cols-3 gap-x-4">
<div>: <span className="text-foreground">{String((llmTestResult.debug.test_params as Record<string, unknown>).temperature ?? 'N/A')}</span></div>
<div>: <span className="text-foreground">{String((llmTestResult.debug.test_params as Record<string, unknown>).timeout ?? 'N/A')}s</span></div>
<div>MaxTokens: <span className="text-foreground">{String((llmTestResult.debug.test_params as Record<string, unknown>).max_tokens ?? 'N/A')}</span></div>
</div>
</div>
{!!llmTestResult.debug.error && (
<div className="text-rose-400 mt-2 font-bold">: {String(llmTestResult.debug.error)}</div>
)}
{llmTestResult.debug.error_category && (
<div className="mt-2">: <span className="text-rose-400">{String(llmTestResult.debug.error_category)}</span></div>
)}
{llmTestResult.debug.error_type && (
<div>: <span className="text-rose-400">{String(llmTestResult.debug.error_type)}</span></div>
)}
{llmTestResult.debug.status_code && (
<div>HTTP : <span className="text-rose-400">{String(llmTestResult.debug.status_code)}</span></div>
)}
{llmTestResult.debug.api_response && (
<div className="mt-2">
<div className="font-bold text-amber-400">API :</div>
<pre className="mt-1 p-2 bg-amber-500/10 border border-amber-500/30 rounded text-xs overflow-x-auto">
{String(llmTestResult.debug.api_response)}
</pre>
</div>
)}
{llmTestResult.debug.error_message && (
<div className="mt-2">
<div className="font-bold text-foreground">:</div>
<pre className="mt-1 p-2 bg-background/50 rounded text-xs overflow-x-auto max-h-32 overflow-y-auto">
{String(llmTestResult.debug.error_message)}
</pre>
</div>
)}
{llmTestResult.debug.traceback && (
<details className="mt-2">
<summary className="cursor-pointer text-muted-foreground hover:text-foreground"></summary>
<pre className="mt-1 p-2 bg-background/50 rounded text-xs overflow-x-auto max-h-48 overflow-y-auto whitespace-pre-wrap">
{!!llmTestResult.debug.traceback && (
<details className="mt-2 text-[10px]">
<summary className="cursor-pointer text-muted-foreground hover:text-foreground"> (Traceback)</summary>
<pre className="mt-1 p-2 bg-background/50 rounded overflow-x-auto max-h-48 overflow-y-auto whitespace-pre-wrap border border-border/20">
{String(llmTestResult.debug.traceback)}
</pre>
</details>
@ -589,6 +552,7 @@ export function SystemConfig() {
value={config.llmTimeout}
onChange={(e) => updateConfig('llmTimeout', Number(e.target.value))}
className="h-10 cyber-input"
disabled
/>
</div>
<div className="space-y-2">
@ -601,6 +565,7 @@ export function SystemConfig() {
value={config.llmTemperature}
onChange={(e) => updateConfig('llmTemperature', Number(e.target.value))}
className="h-10 cyber-input"
disabled
/>
</div>
<div className="space-y-2">
@ -610,6 +575,7 @@ export function SystemConfig() {
value={config.llmMaxTokens}
onChange={(e) => updateConfig('llmMaxTokens', Number(e.target.value))}
className="h-10 cyber-input"
disabled
/>
</div>
</div>
@ -902,13 +868,15 @@ export function SystemConfig() {
</Tabs>
{/* Floating Save Button */}
{hasChanges && (
{
hasChanges && (
<div className="fixed bottom-6 right-6 cyber-card p-4 z-50">
<Button onClick={saveConfig} className="cyber-btn-primary h-12">
<Save className="w-4 h-4 mr-2" />
</Button>
</div>
)}
)
}
{/* Delete SSH Key Confirmation Dialog */}
<AlertDialog open={showDeleteKeyDialog} onOpenChange={setShowDeleteKeyDialog}>
@ -943,6 +911,6 @@ export function SystemConfig() {
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>
</div>
</div >
);
}

View File

@ -5,6 +5,7 @@
import { useState, useEffect } from "react";
import { useNavigate } from "react-router-dom";
import { useAuth } from "@/shared/context/AuthContext";
import { Button } from "@/components/ui/button";
import { Input } from "@/components/ui/input";
import { Label } from "@/components/ui/label";
@ -29,6 +30,7 @@ import type { Profile } from "@/shared/types";
export default function Account() {
const navigate = useNavigate();
const { logout } = useAuth();
const [profile, setProfile] = useState<Profile | null>(null);
const [loading, setLoading] = useState(true);
const [saving, setSaving] = useState(false);
@ -128,13 +130,13 @@ export default function Account() {
};
const handleLogout = () => {
localStorage.removeItem('access_token');
logout();
toast.success("已退出登录");
navigate('/login');
};
const handleSwitchAccount = () => {
localStorage.removeItem('access_token');
logout();
navigate('/login');
};