feat(llm): 增强LLM错误处理和调试信息展示
在LLMError异常类中添加api_response字段存储原始错误信息 实现_extract_api_response方法从异常中提取API响应 前端增加调试信息展示面板,显示详细的错误诊断数据 后端测试接口返回完整的调试信息,包括耗时、错误类型等
This commit is contained in:
parent
89ebd4d797
commit
2e11f3e1a3
|
|
@ -292,6 +292,8 @@ class LLMTestResponse(BaseModel):
|
|||
message: str
|
||||
model: Optional[str] = None
|
||||
response: Optional[str] = None
|
||||
# 调试信息
|
||||
debug: Optional[dict] = None
|
||||
|
||||
|
||||
@router.post("/test-llm", response_model=LLMTestResponse)
|
||||
|
|
@ -302,7 +304,18 @@ async def test_llm_connection(
|
|||
"""测试LLM连接是否正常"""
|
||||
from app.services.llm.factory import LLMFactory, NATIVE_ONLY_PROVIDERS
|
||||
from app.services.llm.adapters import LiteLLMAdapter, BaiduAdapter, MinimaxAdapter, DoubaoAdapter
|
||||
from app.services.llm.types import LLMConfig, LLMProvider, LLMRequest, LLMMessage, DEFAULT_MODELS
|
||||
from app.services.llm.types import LLMConfig, LLMProvider, LLMRequest, LLMMessage, DEFAULT_MODELS, DEFAULT_BASE_URLS
|
||||
import traceback
|
||||
import time
|
||||
|
||||
start_time = time.time()
|
||||
debug_info = {
|
||||
"provider": request.provider,
|
||||
"model_requested": request.model,
|
||||
"base_url_requested": request.baseUrl,
|
||||
"api_key_length": len(request.apiKey) if request.apiKey else 0,
|
||||
"api_key_prefix": request.apiKey[:8] + "..." if request.apiKey and len(request.apiKey) > 8 else "(empty)",
|
||||
}
|
||||
|
||||
try:
|
||||
# 解析provider
|
||||
|
|
@ -322,13 +335,22 @@ async def test_llm_connection(
|
|||
|
||||
provider = provider_map.get(request.provider.lower())
|
||||
if not provider:
|
||||
debug_info["error_type"] = "unsupported_provider"
|
||||
return LLMTestResponse(
|
||||
success=False,
|
||||
message=f"不支持的LLM提供商: {request.provider}"
|
||||
message=f"不支持的LLM提供商: {request.provider}",
|
||||
debug=debug_info
|
||||
)
|
||||
|
||||
# 获取默认模型
|
||||
model = request.model or DEFAULT_MODELS.get(provider)
|
||||
base_url = request.baseUrl or DEFAULT_BASE_URLS.get(provider, "")
|
||||
|
||||
debug_info["model_used"] = model
|
||||
debug_info["base_url_used"] = base_url
|
||||
debug_info["is_native_adapter"] = provider in NATIVE_ONLY_PROVIDERS
|
||||
|
||||
print(f"[LLM Test] 开始测试: provider={provider.value}, model={model}, base_url={base_url}")
|
||||
|
||||
# 创建配置
|
||||
config = LLMConfig(
|
||||
|
|
@ -348,8 +370,12 @@ async def test_llm_connection(
|
|||
LLMProvider.DOUBAO: DoubaoAdapter,
|
||||
}
|
||||
adapter = native_adapter_map[provider](config)
|
||||
debug_info["adapter_type"] = type(adapter).__name__
|
||||
else:
|
||||
adapter = LiteLLMAdapter(config)
|
||||
debug_info["adapter_type"] = "LiteLLMAdapter"
|
||||
# 获取 LiteLLM 实际使用的模型名
|
||||
debug_info["litellm_model"] = getattr(adapter, '_get_model_name', lambda: model)() if hasattr(adapter, '_get_model_name') else model
|
||||
|
||||
test_request = LLMRequest(
|
||||
messages=[
|
||||
|
|
@ -358,49 +384,91 @@ async def test_llm_connection(
|
|||
max_tokens=50,
|
||||
)
|
||||
|
||||
print(f"[LLM Test] 发送测试请求...")
|
||||
response = await adapter.complete(test_request)
|
||||
|
||||
elapsed_time = time.time() - start_time
|
||||
debug_info["elapsed_time_ms"] = round(elapsed_time * 1000, 2)
|
||||
|
||||
# 验证响应内容
|
||||
if not response or not response.content:
|
||||
debug_info["error_type"] = "empty_response"
|
||||
debug_info["raw_response"] = str(response) if response else None
|
||||
print(f"[LLM Test] 空响应: {response}")
|
||||
return LLMTestResponse(
|
||||
success=False,
|
||||
message="LLM 返回空响应,请检查 API Key 和配置"
|
||||
message="LLM 返回空响应,请检查 API Key 和配置",
|
||||
debug=debug_info
|
||||
)
|
||||
|
||||
debug_info["response_length"] = len(response.content)
|
||||
debug_info["usage"] = {
|
||||
"prompt_tokens": getattr(response, 'prompt_tokens', None),
|
||||
"completion_tokens": getattr(response, 'completion_tokens', None),
|
||||
"total_tokens": getattr(response, 'total_tokens', None),
|
||||
}
|
||||
|
||||
print(f"[LLM Test] 成功! 响应: {response.content[:50]}... 耗时: {elapsed_time:.2f}s")
|
||||
|
||||
return LLMTestResponse(
|
||||
success=True,
|
||||
message="LLM连接测试成功",
|
||||
message=f"连接成功 ({elapsed_time:.2f}s)",
|
||||
model=model,
|
||||
response=response.content[:100] if response.content else None
|
||||
response=response.content[:100] if response.content else None,
|
||||
debug=debug_info
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
elapsed_time = time.time() - start_time
|
||||
error_msg = str(e)
|
||||
error_type = type(e).__name__
|
||||
|
||||
debug_info["elapsed_time_ms"] = round(elapsed_time * 1000, 2)
|
||||
debug_info["error_type"] = error_type
|
||||
debug_info["error_message"] = error_msg
|
||||
debug_info["traceback"] = traceback.format_exc()
|
||||
|
||||
# 提取 LLMError 中的 api_response
|
||||
if hasattr(e, 'api_response') and e.api_response:
|
||||
debug_info["api_response"] = e.api_response
|
||||
if hasattr(e, 'status_code') and e.status_code:
|
||||
debug_info["status_code"] = e.status_code
|
||||
|
||||
print(f"[LLM Test] 失败: {error_type}: {error_msg}")
|
||||
print(f"[LLM Test] Traceback:\n{traceback.format_exc()}")
|
||||
|
||||
# 提供更友好的错误信息
|
||||
if "401" in error_msg or "invalid_api_key" in error_msg.lower() or "incorrect api key" in error_msg.lower():
|
||||
return LLMTestResponse(
|
||||
success=False,
|
||||
message="API Key 无效或已过期,请检查后重试"
|
||||
)
|
||||
friendly_message = error_msg
|
||||
|
||||
# 优先检查余额不足(因为某些 API 用 429 表示余额不足)
|
||||
if any(keyword in error_msg for keyword in ["余额不足", "资源包", "充值", "quota", "insufficient", "balance", "402"]):
|
||||
friendly_message = "账户余额不足或配额已用尽,请充值后重试"
|
||||
debug_info["error_category"] = "insufficient_balance"
|
||||
elif "401" in error_msg or "invalid_api_key" in error_msg.lower() or "incorrect api key" in error_msg.lower():
|
||||
friendly_message = "API Key 无效或已过期,请检查后重试"
|
||||
debug_info["error_category"] = "auth_invalid_key"
|
||||
elif "authentication" in error_msg.lower():
|
||||
return LLMTestResponse(
|
||||
success=False,
|
||||
message="认证失败,请检查 API Key 是否正确"
|
||||
)
|
||||
friendly_message = "认证失败,请检查 API Key 是否正确"
|
||||
debug_info["error_category"] = "auth_failed"
|
||||
elif "timeout" in error_msg.lower():
|
||||
return LLMTestResponse(
|
||||
success=False,
|
||||
message="连接超时,请检查网络或 API 地址是否正确"
|
||||
)
|
||||
elif "connection" in error_msg.lower():
|
||||
return LLMTestResponse(
|
||||
success=False,
|
||||
message="无法连接到 API 服务,请检查网络或 API 地址"
|
||||
)
|
||||
friendly_message = "连接超时,请检查网络或 API 地址是否正确"
|
||||
debug_info["error_category"] = "timeout"
|
||||
elif "connection" in error_msg.lower() or "connect" in error_msg.lower():
|
||||
friendly_message = "无法连接到 API 服务,请检查网络或 API 地址"
|
||||
debug_info["error_category"] = "connection"
|
||||
elif "rate" in error_msg.lower() and "limit" in error_msg.lower():
|
||||
friendly_message = "API 请求频率超限,请稍后重试"
|
||||
debug_info["error_category"] = "rate_limit"
|
||||
elif "model" in error_msg.lower() and ("not found" in error_msg.lower() or "does not exist" in error_msg.lower()):
|
||||
friendly_message = f"模型 '{debug_info.get('model_used', 'unknown')}' 不存在或无权访问"
|
||||
debug_info["error_category"] = "model_not_found"
|
||||
else:
|
||||
debug_info["error_category"] = "unknown"
|
||||
|
||||
return LLMTestResponse(
|
||||
success=False,
|
||||
message=f"LLM连接测试失败: {error_msg}"
|
||||
message=friendly_message,
|
||||
debug=debug_info
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -109,6 +109,45 @@ class LiteLLMAdapter(BaseLLMAdapter):
|
|||
|
||||
return f"{prefix}/{model}"
|
||||
|
||||
def _extract_api_response(self, error: Exception) -> Optional[str]:
|
||||
"""从异常中提取 API 服务器返回的原始响应信息"""
|
||||
error_str = str(error)
|
||||
|
||||
# 尝试提取 JSON 格式的错误信息
|
||||
import re
|
||||
import json
|
||||
|
||||
# 匹配 {'error': {...}} 或 {"error": {...}} 格式
|
||||
json_pattern = r"\{['\"]error['\"]:\s*\{[^}]+\}\}"
|
||||
match = re.search(json_pattern, error_str)
|
||||
if match:
|
||||
try:
|
||||
# 将单引号替换为双引号以便 JSON 解析
|
||||
json_str = match.group().replace("'", '"')
|
||||
error_obj = json.loads(json_str)
|
||||
if 'error' in error_obj:
|
||||
err = error_obj['error']
|
||||
code = err.get('code', '')
|
||||
message = err.get('message', '')
|
||||
return f"[{code}] {message}" if code else message
|
||||
except:
|
||||
pass
|
||||
|
||||
# 尝试提取 message 字段
|
||||
message_pattern = r"['\"]message['\"]:\s*['\"]([^'\"]+)['\"]"
|
||||
match = re.search(message_pattern, error_str)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
# 尝试从 litellm 异常中获取原始消息
|
||||
if hasattr(error, 'message'):
|
||||
return error.message
|
||||
if hasattr(error, 'llm_provider'):
|
||||
# litellm 异常通常包含原始错误信息
|
||||
return error_str.split(' - ')[-1] if ' - ' in error_str else None
|
||||
|
||||
return None
|
||||
|
||||
def _get_api_base(self) -> Optional[str]:
|
||||
"""获取 API 基础 URL"""
|
||||
# 优先使用用户配置的 base_url
|
||||
|
|
@ -200,20 +239,31 @@ class LiteLLMAdapter(BaseLLMAdapter):
|
|||
# 调用 LiteLLM
|
||||
response = await litellm.acompletion(**kwargs)
|
||||
except litellm.exceptions.AuthenticationError as e:
|
||||
raise LLMError(f"API Key 无效或已过期: {str(e)}", self.config.provider, 401)
|
||||
api_response = self._extract_api_response(e)
|
||||
raise LLMError(f"API Key 无效或已过期", self.config.provider, 401, api_response=api_response)
|
||||
except litellm.exceptions.RateLimitError as e:
|
||||
raise LLMError(f"API 调用频率超限: {str(e)}", self.config.provider, 429)
|
||||
error_msg = str(e)
|
||||
api_response = self._extract_api_response(e)
|
||||
# 区分"余额不足"和"频率超限"
|
||||
if any(keyword in error_msg for keyword in ["余额不足", "资源包", "充值", "quota", "insufficient", "balance"]):
|
||||
raise LLMError(f"账户余额不足或配额已用尽,请充值后重试", self.config.provider, 402, api_response=api_response)
|
||||
raise LLMError(f"API 调用频率超限,请稍后重试", self.config.provider, 429, api_response=api_response)
|
||||
except litellm.exceptions.APIConnectionError as e:
|
||||
raise LLMError(f"无法连接到 API 服务: {str(e)}", self.config.provider)
|
||||
api_response = self._extract_api_response(e)
|
||||
raise LLMError(f"无法连接到 API 服务", self.config.provider, api_response=api_response)
|
||||
except litellm.exceptions.APIError as e:
|
||||
raise LLMError(f"API 错误: {str(e)}", self.config.provider, getattr(e, 'status_code', None))
|
||||
api_response = self._extract_api_response(e)
|
||||
raise LLMError(f"API 错误", self.config.provider, getattr(e, 'status_code', None), api_response=api_response)
|
||||
except Exception as e:
|
||||
# 捕获其他异常并重新抛出
|
||||
error_msg = str(e)
|
||||
api_response = self._extract_api_response(e)
|
||||
if "invalid_api_key" in error_msg.lower() or "incorrect api key" in error_msg.lower():
|
||||
raise LLMError(f"API Key 无效: {error_msg}", self.config.provider, 401)
|
||||
raise LLMError(f"API Key 无效", self.config.provider, 401, api_response=api_response)
|
||||
elif "authentication" in error_msg.lower():
|
||||
raise LLMError(f"认证失败: {error_msg}", self.config.provider, 401)
|
||||
raise LLMError(f"认证失败", self.config.provider, 401, api_response=api_response)
|
||||
elif any(keyword in error_msg for keyword in ["余额不足", "资源包", "充值", "quota", "insufficient", "balance"]):
|
||||
raise LLMError(f"账户余额不足或配额已用尽", self.config.provider, 402, api_response=api_response)
|
||||
raise
|
||||
|
||||
# 解析响应
|
||||
|
|
|
|||
|
|
@ -79,12 +79,14 @@ class LLMError(Exception):
|
|||
message: str,
|
||||
provider: Optional[LLMProvider] = None,
|
||||
status_code: Optional[int] = None,
|
||||
original_error: Optional[Any] = None
|
||||
original_error: Optional[Any] = None,
|
||||
api_response: Optional[str] = None
|
||||
):
|
||||
super().__init__(message)
|
||||
self.provider = provider
|
||||
self.status_code = status_code
|
||||
self.original_error = original_error
|
||||
self.api_response = api_response # API 服务器返回的原始错误信息
|
||||
|
||||
|
||||
# 各平台默认模型 (2025年最新推荐)
|
||||
|
|
|
|||
|
|
@ -51,7 +51,8 @@ export function SystemConfig() {
|
|||
const [showApiKey, setShowApiKey] = useState(false);
|
||||
const [hasChanges, setHasChanges] = useState(false);
|
||||
const [testingLLM, setTestingLLM] = useState(false);
|
||||
const [llmTestResult, setLlmTestResult] = useState<{ success: boolean; message: string } | null>(null);
|
||||
const [llmTestResult, setLlmTestResult] = useState<{ success: boolean; message: string; debug?: Record<string, unknown> } | null>(null);
|
||||
const [showDebugInfo, setShowDebugInfo] = useState(false);
|
||||
|
||||
useEffect(() => { loadConfig(); }, []);
|
||||
|
||||
|
|
@ -372,6 +373,7 @@ export function SystemConfig() {
|
|||
</div>
|
||||
{llmTestResult && (
|
||||
<div className={`p-3 rounded-lg ${llmTestResult.success ? 'bg-emerald-500/10 border border-emerald-500/30' : 'bg-rose-500/10 border border-rose-500/30'}`}>
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-2 text-sm">
|
||||
{llmTestResult.success ? (
|
||||
<CheckCircle2 className="h-4 w-4 text-emerald-400" />
|
||||
|
|
@ -382,6 +384,61 @@ export function SystemConfig() {
|
|||
{llmTestResult.message}
|
||||
</span>
|
||||
</div>
|
||||
{llmTestResult.debug && (
|
||||
<button
|
||||
onClick={() => setShowDebugInfo(!showDebugInfo)}
|
||||
className="text-xs text-muted-foreground hover:text-foreground underline"
|
||||
>
|
||||
{showDebugInfo ? '隐藏调试信息' : '显示调试信息'}
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
{showDebugInfo && llmTestResult.debug && (
|
||||
<div className="mt-3 pt-3 border-t border-border/50">
|
||||
<div className="text-xs font-mono space-y-1 text-muted-foreground">
|
||||
<div className="font-bold text-foreground mb-2">调试信息:</div>
|
||||
<div>Provider: <span className="text-foreground">{String(llmTestResult.debug.provider)}</span></div>
|
||||
<div>Model: <span className="text-foreground">{String(llmTestResult.debug.model_used || llmTestResult.debug.model_requested || 'N/A')}</span></div>
|
||||
<div>Base URL: <span className="text-foreground">{String(llmTestResult.debug.base_url_used || llmTestResult.debug.base_url_requested || '(default)')}</span></div>
|
||||
<div>Adapter: <span className="text-foreground">{String(llmTestResult.debug.adapter_type || 'N/A')}</span></div>
|
||||
<div>API Key: <span className="text-foreground">{String(llmTestResult.debug.api_key_prefix)} (长度: {String(llmTestResult.debug.api_key_length)})</span></div>
|
||||
<div>耗时: <span className="text-foreground">{String(llmTestResult.debug.elapsed_time_ms || 'N/A')} ms</span></div>
|
||||
{llmTestResult.debug.error_category && (
|
||||
<div>错误类型: <span className="text-rose-400">{String(llmTestResult.debug.error_category)}</span></div>
|
||||
)}
|
||||
{llmTestResult.debug.error_type && (
|
||||
<div>异常类型: <span className="text-rose-400">{String(llmTestResult.debug.error_type)}</span></div>
|
||||
)}
|
||||
{llmTestResult.debug.status_code && (
|
||||
<div>HTTP 状态码: <span className="text-rose-400">{String(llmTestResult.debug.status_code)}</span></div>
|
||||
)}
|
||||
{llmTestResult.debug.api_response && (
|
||||
<div className="mt-2">
|
||||
<div className="font-bold text-amber-400">API 服务器返回:</div>
|
||||
<pre className="mt-1 p-2 bg-amber-500/10 border border-amber-500/30 rounded text-xs overflow-x-auto">
|
||||
{String(llmTestResult.debug.api_response)}
|
||||
</pre>
|
||||
</div>
|
||||
)}
|
||||
{llmTestResult.debug.error_message && (
|
||||
<div className="mt-2">
|
||||
<div className="font-bold text-foreground">完整错误信息:</div>
|
||||
<pre className="mt-1 p-2 bg-background/50 rounded text-xs overflow-x-auto max-h-32 overflow-y-auto">
|
||||
{String(llmTestResult.debug.error_message)}
|
||||
</pre>
|
||||
</div>
|
||||
)}
|
||||
{llmTestResult.debug.traceback && (
|
||||
<details className="mt-2">
|
||||
<summary className="cursor-pointer text-muted-foreground hover:text-foreground">完整堆栈跟踪</summary>
|
||||
<pre className="mt-1 p-2 bg-background/50 rounded text-xs overflow-x-auto max-h-48 overflow-y-auto whitespace-pre-wrap">
|
||||
{String(llmTestResult.debug.traceback)}
|
||||
</pre>
|
||||
</details>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue