feat(agent): 使用用户配置的LLM参数替代硬编码值

重构所有Agent和LLM服务,移除硬编码的temperature和max_tokens参数
添加get_analysis_config函数统一处理分析配置
在LLM测试接口中显示用户保存的配置参数
前端调试面板默认显示LLM测试详细信息
This commit is contained in:
lintsinghua 2025-12-19 16:08:26 +08:00
parent 9fe15f0d0b
commit 8fe96a83cf
11 changed files with 257 additions and 89 deletions

View File

@ -299,6 +299,7 @@ class LLMTestResponse(BaseModel):
@router.post("/test-llm", response_model=LLMTestResponse)
async def test_llm_connection(
request: LLMTestRequest,
db: AsyncSession = Depends(get_db),
current_user: User = Depends(deps.get_current_user),
) -> Any:
"""测试LLM连接是否正常"""
@ -309,12 +310,53 @@ async def test_llm_connection(
import time
start_time = time.time()
# 获取用户保存的配置
result = await db.execute(
select(UserConfig).where(UserConfig.user_id == current_user.id)
)
user_config_record = result.scalar_one_or_none()
# 解析用户配置
saved_llm_config = {}
saved_other_config = {}
if user_config_record:
if user_config_record.llm_config:
saved_llm_config = decrypt_config(
json.loads(user_config_record.llm_config),
SENSITIVE_LLM_FIELDS
)
if user_config_record.other_config:
saved_other_config = decrypt_config(
json.loads(user_config_record.other_config),
SENSITIVE_OTHER_FIELDS
)
# 从保存的配置中获取参数(用于调试显示)
saved_timeout_ms = saved_llm_config.get('llmTimeout', settings.LLM_TIMEOUT * 1000)
saved_temperature = saved_llm_config.get('llmTemperature', settings.LLM_TEMPERATURE)
saved_max_tokens = saved_llm_config.get('llmMaxTokens', settings.LLM_MAX_TOKENS)
saved_concurrency = saved_other_config.get('llmConcurrency', settings.LLM_CONCURRENCY)
saved_gap_ms = saved_other_config.get('llmGapMs', settings.LLM_GAP_MS)
saved_max_files = saved_other_config.get('maxAnalyzeFiles', settings.MAX_ANALYZE_FILES)
saved_output_lang = saved_other_config.get('outputLanguage', settings.OUTPUT_LANGUAGE)
debug_info = {
"provider": request.provider,
"model_requested": request.model,
"base_url_requested": request.baseUrl,
"api_key_length": len(request.apiKey) if request.apiKey else 0,
"api_key_prefix": request.apiKey[:8] + "..." if request.apiKey and len(request.apiKey) > 8 else "(empty)",
# 用户保存的配置参数
"saved_config": {
"timeout_ms": saved_timeout_ms,
"temperature": saved_temperature,
"max_tokens": saved_max_tokens,
"concurrency": saved_concurrency,
"gap_ms": saved_gap_ms,
"max_analyze_files": saved_max_files,
"output_language": saved_output_lang,
},
}
try:
@ -346,11 +388,21 @@ async def test_llm_connection(
model = request.model or DEFAULT_MODELS.get(provider)
base_url = request.baseUrl or DEFAULT_BASE_URLS.get(provider, "")
# 测试时使用用户保存的所有配置参数
test_timeout = int(saved_timeout_ms / 1000) if saved_timeout_ms else settings.LLM_TIMEOUT
test_temperature = saved_temperature if saved_temperature is not None else settings.LLM_TEMPERATURE
test_max_tokens = saved_max_tokens if saved_max_tokens else settings.LLM_MAX_TOKENS
debug_info["model_used"] = model
debug_info["base_url_used"] = base_url
debug_info["is_native_adapter"] = provider in NATIVE_ONLY_PROVIDERS
debug_info["test_params"] = {
"timeout": test_timeout,
"temperature": test_temperature,
"max_tokens": test_max_tokens,
}
print(f"[LLM Test] 开始测试: provider={provider.value}, model={model}, base_url={base_url}")
print(f"[LLM Test] 开始测试: provider={provider.value}, model={model}, base_url={base_url}, temperature={test_temperature}, timeout={test_timeout}s, max_tokens={test_max_tokens}")
# 创建配置
config = LLMConfig(
@ -358,8 +410,9 @@ async def test_llm_connection(
api_key=request.apiKey,
model=model,
base_url=request.baseUrl,
timeout=30, # 测试使用较短的超时时间
max_tokens=50, # 测试使用较少的token
timeout=test_timeout,
temperature=test_temperature,
max_tokens=test_max_tokens,
)
# 直接创建新的适配器实例(不使用缓存),确保使用最新的配置
@ -375,13 +428,14 @@ async def test_llm_connection(
adapter = LiteLLMAdapter(config)
debug_info["adapter_type"] = "LiteLLMAdapter"
# 获取 LiteLLM 实际使用的模型名
debug_info["litellm_model"] = getattr(adapter, '_get_model_name', lambda: model)() if hasattr(adapter, '_get_model_name') else model
debug_info["litellm_model"] = getattr(adapter, '_get_litellm_model', lambda: model)() if hasattr(adapter, '_get_litellm_model') else model
test_request = LLMRequest(
messages=[
LLMMessage(role="user", content="Say 'Hello' in one word.")
],
max_tokens=50,
temperature=test_temperature,
max_tokens=test_max_tokens,
)
print(f"[LLM Test] 发送测试请求...")

View File

@ -20,7 +20,7 @@ from app.models.project import Project
from app.models.analysis import InstantAnalysis
from app.models.user_config import UserConfig
from app.services.llm.service import LLMService
from app.services.scanner import task_control, is_text_file, should_exclude, get_language_from_path
from app.services.scanner import task_control, is_text_file, should_exclude, get_language_from_path, get_analysis_config
from app.services.zip_storage import load_project_zip, save_project_zip, has_project_zip
from app.core.config import settings
@ -93,6 +93,11 @@ async def process_zip_task(task_id: str, file_path: str, db_session_factory, use
except:
pass
# 获取分析配置(优先使用用户配置)
analysis_config = get_analysis_config(user_config)
max_analyze_files = analysis_config['max_analyze_files']
llm_gap_ms = analysis_config['llm_gap_ms']
# 限制文件数量
# 如果指定了特定文件,则只分析这些文件
target_files = scan_config.get('file_paths', [])
@ -101,13 +106,13 @@ async def process_zip_task(task_id: str, file_path: str, db_session_factory, use
normalized_targets = {normalize_path(p) for p in target_files}
print(f"🎯 ZIP任务: 指定分析 {len(normalized_targets)} 个文件")
files_to_scan = [f for f in files_to_scan if f['path'] in normalized_targets]
elif settings.MAX_ANALYZE_FILES > 0:
files_to_scan = files_to_scan[:settings.MAX_ANALYZE_FILES]
elif max_analyze_files > 0:
files_to_scan = files_to_scan[:max_analyze_files]
task.total_files = len(files_to_scan)
await db.commit()
print(f"📊 ZIP任务 {task_id}: 找到 {len(files_to_scan)} 个文件")
print(f"📊 ZIP任务 {task_id}: 找到 {len(files_to_scan)} 个文件 (最大文件数: {max_analyze_files}, 请求间隔: {llm_gap_ms}ms)")
total_issues = 0
total_lines = 0
@ -178,12 +183,12 @@ async def process_zip_task(task_id: str, file_path: str, db_session_factory, use
print(f"📈 ZIP任务 {task_id}: 进度 {scanned_files}/{len(files_to_scan)}")
# 请求间隔
await asyncio.sleep(settings.LLM_GAP_MS / 1000)
await asyncio.sleep(llm_gap_ms / 1000)
except Exception as file_error:
failed_files += 1
print(f"❌ ZIP任务分析文件失败 ({file_info['path']}): {file_error}")
await asyncio.sleep(settings.LLM_GAP_MS / 1000)
await asyncio.sleep(llm_gap_ms / 1000)
# 完成任务
avg_quality_score = sum(quality_scores) / len(quality_scores) if quality_scores else 100.0

View File

@ -452,12 +452,11 @@ class AnalysisAgent(BaseAgent):
break
# 调用 LLM 进行思考和决策(流式输出)
# 🔥 增加 max_tokens 到 4096避免长输出被截断
# 🔥 使用用户配置的 temperature 和 max_tokens
try:
llm_output, tokens_this_round = await self.stream_llm_call(
self._conversation_history,
temperature=0.1,
max_tokens=8192,
# 🔥 不传递 temperature 和 max_tokens使用用户配置
)
except asyncio.CancelledError:
logger.info(f"[{self.name}] LLM call cancelled")
@ -653,8 +652,7 @@ Final Answer:""",
try:
summary_output, _ = await self.stream_llm_call(
self._conversation_history,
temperature=0.1,
max_tokens=4096,
# 🔥 不传递 temperature 和 max_tokens使用用户配置
)
if summary_output and summary_output.strip():

View File

@ -845,10 +845,9 @@ class BaseAgent(ABC):
self._iteration += 1
try:
# 🔥 不传递 temperature 和 max_tokens让 LLMService 使用用户配置
response = await self.llm_service.chat_completion(
messages=messages,
temperature=self.config.temperature,
max_tokens=self.config.max_tokens,
tools=tools,
)
@ -929,8 +928,8 @@ class BaseAgent(ABC):
async def stream_llm_call(
self,
messages: List[Dict[str, str]],
temperature: float = 0.1,
max_tokens: int = 2048,
temperature: Optional[float] = None,
max_tokens: Optional[int] = None,
auto_compress: bool = True,
) -> Tuple[str, int]:
"""
@ -940,8 +939,8 @@ class BaseAgent(ABC):
Args:
messages: 消息列表
temperature: 温度
max_tokens: 最大 token
temperature: 温度None 时使用用户配置
max_tokens: 最大 token None 时使用用户配置
auto_compress: 是否自动压缩过长的消息历史
Returns:
@ -964,7 +963,7 @@ class BaseAgent(ABC):
logger.info(f"[{self.name}] ✅ thinking_start emitted, starting LLM stream...")
try:
# 获取流式迭代器
# 获取流式迭代器(传入 None 时使用用户配置)
stream = self.llm_service.chat_completion_stream(
messages=messages,
temperature=temperature,

View File

@ -241,8 +241,7 @@ class OrchestratorAgent(BaseAgent):
try:
llm_output, tokens_this_round = await self.stream_llm_call(
self._conversation_history,
temperature=0.1,
max_tokens=8192, # 🔥 增加到 8192避免截断
# 🔥 不传递 temperature 和 max_tokens使用用户配置
)
except asyncio.CancelledError:
logger.info(f"[{self.name}] LLM call cancelled")

View File

@ -358,8 +358,7 @@ class ReconAgent(BaseAgent):
try:
llm_output, tokens_this_round = await self.stream_llm_call(
self._conversation_history,
temperature=0.1,
max_tokens=8192, # 🔥 增加到 8192避免截断
# 🔥 不传递 temperature 和 max_tokens使用用户配置
)
except asyncio.CancelledError:
logger.info(f"[{self.name}] LLM call cancelled")
@ -525,8 +524,7 @@ Final Answer:""",
try:
summary_output, _ = await self.stream_llm_call(
self._conversation_history,
temperature=0.1,
max_tokens=2048,
# 🔥 不传递 temperature 和 max_tokens使用用户配置
)
if summary_output and summary_output.strip():

View File

@ -587,8 +587,7 @@ class VerificationAgent(BaseAgent):
try:
llm_output, tokens_this_round = await self.stream_llm_call(
self._conversation_history,
temperature=0.1,
max_tokens=8192, # 🔥 增加到 8192避免截断
# 🔥 不传递 temperature 和 max_tokens使用用户配置
)
except asyncio.CancelledError:
logger.info(f"[{self.name}] LLM call cancelled")

View File

@ -125,8 +125,7 @@ class LLMRouter:
{"role": "system", "content": "你是安全审计流程的决策者,负责决定下一步行动。"},
{"role": "user", "content": prompt},
],
temperature=0.1,
max_tokens=200,
# 🔥 不传递 temperature 和 max_tokens使用用户配置
)
content = response.get("content", "")
@ -180,8 +179,7 @@ class LLMRouter:
{"role": "system", "content": "你是安全审计流程的决策者,负责决定下一步行动。"},
{"role": "user", "content": prompt},
],
temperature=0.1,
max_tokens=200,
# 🔥 不传递 temperature 和 max_tokens使用用户配置
)
content = response.get("content", "")
@ -227,8 +225,7 @@ class LLMRouter:
{"role": "system", "content": "你是安全审计流程的决策者,负责决定下一步行动。"},
{"role": "user", "content": prompt},
],
temperature=0.1,
max_tokens=200,
# 🔥 不传递 temperature 和 max_tokens使用用户配置
)
content = response.get("content", "")

View File

@ -359,12 +359,14 @@ Please analyze the following code:
try:
adapter = LLMFactory.create_adapter(self.config)
# 使用用户配置的 temperature如果未设置则使用 config 中的默认值)
request = LLMRequest(
messages=[
LLMMessage(role="system", content=system_prompt),
LLMMessage(role="user", content=user_prompt)
],
temperature=0.1,
temperature=self.config.temperature,
max_tokens=self.config.max_tokens,
)
response = await adapter.complete(request)
@ -402,23 +404,29 @@ Please analyze the following code:
# 重新抛出异常,让调用者处理
raise
async def chat_completion_raw(
async def chat_completion(
self,
messages: List[Dict[str, str]],
temperature: float = 0.1,
max_tokens: int = 4096,
temperature: Optional[float] = None,
max_tokens: Optional[int] = None,
tools: Optional[List[Dict[str, Any]]] = None,
) -> Dict[str, Any]:
"""
🔥 Agent 使用的原始聊天完成接口兼容旧接口
🔥 Agent 使用的聊天完成接口支持工具调用
Args:
messages: 消息列表格式为 [{"role": "user", "content": "..."}]
temperature: 温度参数
max_tokens: 最大token数
temperature: 温度参数None 时使用用户配置
max_tokens: 最大token数None 时使用用户配置
tools: 工具描述列表可选
Returns:
包含 content usage 的字典
包含 contentusage tool_calls 的字典
"""
# 使用用户配置作为默认值
actual_temperature = temperature if temperature is not None else self.config.temperature
actual_max_tokens = max_tokens if max_tokens is not None else self.config.max_tokens
# 转换消息格式
llm_messages = [
LLMMessage(role=msg["role"], content=msg["content"])
@ -427,8 +435,60 @@ Please analyze the following code:
request = LLMRequest(
messages=llm_messages,
temperature=temperature,
max_tokens=max_tokens,
temperature=actual_temperature,
max_tokens=actual_max_tokens,
tools=tools,
)
adapter = LLMFactory.create_adapter(self.config)
response = await adapter.complete(request)
result = {
"content": response.content,
"usage": {
"prompt_tokens": response.usage.prompt_tokens if response.usage else 0,
"completion_tokens": response.usage.completion_tokens if response.usage else 0,
"total_tokens": response.usage.total_tokens if response.usage else 0,
},
}
# 添加工具调用信息
if response.tool_calls:
result["tool_calls"] = response.tool_calls
return result
async def chat_completion_raw(
self,
messages: List[Dict[str, str]],
temperature: Optional[float] = None,
max_tokens: Optional[int] = None,
) -> Dict[str, Any]:
"""
🔥 Agent 使用的原始聊天完成接口兼容旧接口
Args:
messages: 消息列表格式为 [{"role": "user", "content": "..."}]
temperature: 温度参数None 时使用用户配置
max_tokens: 最大token数None 时使用用户配置
Returns:
包含 content usage 的字典
"""
# 使用用户配置作为默认值
actual_temperature = temperature if temperature is not None else self.config.temperature
actual_max_tokens = max_tokens if max_tokens is not None else self.config.max_tokens
# 转换消息格式
llm_messages = [
LLMMessage(role=msg["role"], content=msg["content"])
for msg in messages
]
request = LLMRequest(
messages=llm_messages,
temperature=actual_temperature,
max_tokens=actual_max_tokens,
)
adapter = LLMFactory.create_adapter(self.config)
@ -446,20 +506,24 @@ Please analyze the following code:
async def chat_completion_stream(
self,
messages: List[Dict[str, str]],
temperature: float = 0.1,
max_tokens: int = 4096,
temperature: Optional[float] = None,
max_tokens: Optional[int] = None,
):
"""
流式聊天完成接口 token 返回
Args:
messages: 消息列表
temperature: 温度参数
max_tokens: 最大token数
temperature: 温度参数None 时使用用户配置
max_tokens: 最大token数None 时使用用户配置
Yields:
dict: {"type": "token", "content": str} {"type": "done", ...}
"""
# 使用用户配置作为默认值
actual_temperature = temperature if temperature is not None else self.config.temperature
actual_max_tokens = max_tokens if max_tokens is not None else self.config.max_tokens
llm_messages = [
LLMMessage(role=msg["role"], content=msg["content"])
for msg in messages
@ -467,8 +531,8 @@ Please analyze the following code:
request = LLMRequest(
messages=llm_messages,
temperature=temperature,
max_tokens=max_tokens,
temperature=actual_temperature,
max_tokens=actual_max_tokens,
)
if self.config.provider in NATIVE_ONLY_PROVIDERS:
@ -870,12 +934,14 @@ Please analyze the following code:
try:
adapter = LLMFactory.create_adapter(self.config)
# 使用用户配置的 temperature 和 max_tokens
request = LLMRequest(
messages=[
LLMMessage(role="system", content=full_system_prompt),
LLMMessage(role="user", content=user_prompt)
],
temperature=0.1,
temperature=self.config.temperature,
max_tokens=self.config.max_tokens,
)
response = await adapter.complete(request)

View File

@ -15,6 +15,25 @@ from app.services.llm.service import LLMService
from app.core.config import settings
def get_analysis_config(user_config: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""
获取分析配置参数优先使用用户配置然后使用系统配置
Returns:
包含以下字段的字典:
- max_analyze_files: 最大分析文件数
- llm_concurrency: LLM 并发数
- llm_gap_ms: LLM 请求间隔毫秒
"""
other_config = (user_config or {}).get('otherConfig', {})
return {
'max_analyze_files': other_config.get('maxAnalyzeFiles') or settings.MAX_ANALYZE_FILES,
'llm_concurrency': other_config.get('llmConcurrency') or settings.LLM_CONCURRENCY,
'llm_gap_ms': other_config.get('llmGapMs') or settings.LLM_GAP_MS,
}
# 支持的文本文件扩展名
TEXT_EXTENSIONS = [
".js", ".ts", ".tsx", ".jsx", ".py", ".java", ".go", ".rs",
@ -344,19 +363,24 @@ async def scan_repo_task(task_id: str, db_session_factory, user_config: dict = N
print(f"✅ 成功获取分支 {actual_branch} 的文件列表")
# 获取分析配置(优先使用用户配置)
analysis_config = get_analysis_config(user_config)
max_analyze_files = analysis_config['max_analyze_files']
llm_gap_ms = analysis_config['llm_gap_ms']
# 限制文件数量
# 如果指定了特定文件,则只分析这些文件
target_files = (user_config or {}).get('scan_config', {}).get('file_paths', [])
if target_files:
print(f"🎯 指定分析 {len(target_files)} 个文件")
files = [f for f in files if f['path'] in target_files]
elif settings.MAX_ANALYZE_FILES > 0:
files = files[:settings.MAX_ANALYZE_FILES]
elif max_analyze_files > 0:
files = files[:max_analyze_files]
task.total_files = len(files)
await db.commit()
print(f"📊 获取到 {len(files)} 个文件,开始分析")
print(f"📊 获取到 {len(files)} 个文件,开始分析 (最大文件数: {max_analyze_files}, 请求间隔: {llm_gap_ms}ms)")
# 4. 分析文件
total_issues = 0
@ -484,7 +508,7 @@ async def scan_repo_task(task_id: str, db_session_factory, user_config: dict = N
print(f"📈 任务 {task_id}: 进度 {scanned_files}/{len(files)} ({int(scanned_files/len(files)*100)}%)")
# 请求间隔
await asyncio.sleep(settings.LLM_GAP_MS / 1000)
await asyncio.sleep(llm_gap_ms / 1000)
except Exception as file_error:
failed_files += 1
@ -494,7 +518,7 @@ async def scan_repo_task(task_id: str, db_session_factory, user_config: dict = N
print(f"❌ 分析文件失败 ({file_info['path']}): {file_error}")
print(f" 错误类型: {type(file_error).__name__}")
print(f" 详细信息: {traceback.format_exc()}")
await asyncio.sleep(settings.LLM_GAP_MS / 1000)
await asyncio.sleep(llm_gap_ms / 1000)
# 5. 完成任务
avg_quality_score = sum(quality_scores) / len(quality_scores) if quality_scores else 100.0

View File

@ -52,7 +52,7 @@ export function SystemConfig() {
const [hasChanges, setHasChanges] = useState(false);
const [testingLLM, setTestingLLM] = useState(false);
const [llmTestResult, setLlmTestResult] = useState<{ success: boolean; message: string; debug?: Record<string, unknown> } | null>(null);
const [showDebugInfo, setShowDebugInfo] = useState(false);
const [showDebugInfo, setShowDebugInfo] = useState(true);
useEffect(() => { loadConfig(); }, []);
@ -396,15 +396,44 @@ export function SystemConfig() {
{showDebugInfo && llmTestResult.debug && (
<div className="mt-3 pt-3 border-t border-border/50">
<div className="text-xs font-mono space-y-1 text-muted-foreground">
<div className="font-bold text-foreground mb-2">:</div>
<div className="font-bold text-foreground mb-2">:</div>
<div>Provider: <span className="text-foreground">{String(llmTestResult.debug.provider)}</span></div>
<div>Model: <span className="text-foreground">{String(llmTestResult.debug.model_used || llmTestResult.debug.model_requested || 'N/A')}</span></div>
<div>Base URL: <span className="text-foreground">{String(llmTestResult.debug.base_url_used || llmTestResult.debug.base_url_requested || '(default)')}</span></div>
<div>Adapter: <span className="text-foreground">{String(llmTestResult.debug.adapter_type || 'N/A')}</span></div>
<div>API Key: <span className="text-foreground">{String(llmTestResult.debug.api_key_prefix)} (: {String(llmTestResult.debug.api_key_length)})</span></div>
<div>: <span className="text-foreground">{String(llmTestResult.debug.elapsed_time_ms || 'N/A')} ms</span></div>
{/* 用户保存的配置参数 */}
{llmTestResult.debug.saved_config && (
<div className="mt-3 pt-2 border-t border-border/30">
<div className="font-bold text-cyan-400 mb-2">:</div>
<div className="grid grid-cols-2 gap-x-4 gap-y-1">
<div>: <span className="text-foreground">{String((llmTestResult.debug.saved_config as Record<string, unknown>).temperature ?? 'N/A')}</span></div>
<div>Tokens: <span className="text-foreground">{String((llmTestResult.debug.saved_config as Record<string, unknown>).max_tokens ?? 'N/A')}</span></div>
<div>: <span className="text-foreground">{String((llmTestResult.debug.saved_config as Record<string, unknown>).timeout_ms ?? 'N/A')} ms</span></div>
<div>: <span className="text-foreground">{String((llmTestResult.debug.saved_config as Record<string, unknown>).gap_ms ?? 'N/A')} ms</span></div>
<div>: <span className="text-foreground">{String((llmTestResult.debug.saved_config as Record<string, unknown>).concurrency ?? 'N/A')}</span></div>
<div>: <span className="text-foreground">{String((llmTestResult.debug.saved_config as Record<string, unknown>).max_analyze_files ?? 'N/A')}</span></div>
<div>: <span className="text-foreground">{String((llmTestResult.debug.saved_config as Record<string, unknown>).output_language ?? 'N/A')}</span></div>
</div>
</div>
)}
{/* 测试时实际使用的参数 */}
{llmTestResult.debug.test_params && (
<div className="mt-2 pt-2 border-t border-border/30">
<div className="font-bold text-emerald-400 mb-2">使:</div>
<div className="grid grid-cols-3 gap-x-4">
<div>: <span className="text-foreground">{String((llmTestResult.debug.test_params as Record<string, unknown>).temperature ?? 'N/A')}</span></div>
<div>: <span className="text-foreground">{String((llmTestResult.debug.test_params as Record<string, unknown>).timeout ?? 'N/A')}s</span></div>
<div>MaxTokens: <span className="text-foreground">{String((llmTestResult.debug.test_params as Record<string, unknown>).max_tokens ?? 'N/A')}</span></div>
</div>
</div>
)}
{llmTestResult.debug.error_category && (
<div>: <span className="text-rose-400">{String(llmTestResult.debug.error_category)}</span></div>
<div className="mt-2">: <span className="text-rose-400">{String(llmTestResult.debug.error_category)}</span></div>
)}
{llmTestResult.debug.error_type && (
<div>: <span className="text-rose-400">{String(llmTestResult.debug.error_type)}</span></div>