feat: Improve report generation and statistics by normalizing severity comparisons and leveraging backend-calculated task metrics.
This commit is contained in:
parent
d449e2ba78
commit
2a332d6eda
|
|
@ -185,7 +185,8 @@ class AgentFindingResponse(BaseModel):
|
||||||
code_snippet: Optional[str]
|
code_snippet: Optional[str]
|
||||||
|
|
||||||
is_verified: bool
|
is_verified: bool
|
||||||
confidence: float
|
# 🔥 FIX: Map from ai_confidence in ORM, make Optional with default
|
||||||
|
confidence: Optional[float] = Field(default=0.5, validation_alias="ai_confidence")
|
||||||
status: str
|
status: str
|
||||||
|
|
||||||
suggestion: Optional[str] = None
|
suggestion: Optional[str] = None
|
||||||
|
|
@ -193,8 +194,10 @@ class AgentFindingResponse(BaseModel):
|
||||||
|
|
||||||
created_at: datetime
|
created_at: datetime
|
||||||
|
|
||||||
class Config:
|
model_config = {
|
||||||
from_attributes = True
|
"from_attributes": True,
|
||||||
|
"populate_by_name": True, # Allow both 'confidence' and 'ai_confidence'
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class TaskSummaryResponse(BaseModel):
|
class TaskSummaryResponse(BaseModel):
|
||||||
|
|
@ -2201,6 +2204,16 @@ async def generate_audit_report(
|
||||||
)
|
)
|
||||||
findings = findings.scalars().all()
|
findings = findings.scalars().all()
|
||||||
|
|
||||||
|
# 🔥 Helper function to normalize severity for comparison (case-insensitive)
|
||||||
|
def normalize_severity(sev: str) -> str:
|
||||||
|
return str(sev).lower().strip() if sev else ""
|
||||||
|
|
||||||
|
# Log findings for debugging
|
||||||
|
logger.info(f"[Report] Task {task_id}: Found {len(findings)} findings from database")
|
||||||
|
if findings:
|
||||||
|
for i, f in enumerate(findings[:3]): # Log first 3
|
||||||
|
logger.debug(f"[Report] Finding {i+1}: severity='{f.severity}', title='{f.title[:50] if f.title else 'N/A'}'")
|
||||||
|
|
||||||
if format == "json":
|
if format == "json":
|
||||||
# Enhanced JSON report with full metadata
|
# Enhanced JSON report with full metadata
|
||||||
return {
|
return {
|
||||||
|
|
@ -2218,10 +2231,10 @@ async def generate_audit_report(
|
||||||
"total_findings": len(findings),
|
"total_findings": len(findings),
|
||||||
"verified_findings": sum(1 for f in findings if f.is_verified),
|
"verified_findings": sum(1 for f in findings if f.is_verified),
|
||||||
"severity_distribution": {
|
"severity_distribution": {
|
||||||
"critical": sum(1 for f in findings if f.severity == 'critical'),
|
"critical": sum(1 for f in findings if normalize_severity(f.severity) == 'critical'),
|
||||||
"high": sum(1 for f in findings if f.severity == 'high'),
|
"high": sum(1 for f in findings if normalize_severity(f.severity) == 'high'),
|
||||||
"medium": sum(1 for f in findings if f.severity == 'medium'),
|
"medium": sum(1 for f in findings if normalize_severity(f.severity) == 'medium'),
|
||||||
"low": sum(1 for f in findings if f.severity == 'low'),
|
"low": sum(1 for f in findings if normalize_severity(f.severity) == 'low'),
|
||||||
},
|
},
|
||||||
"agent_metrics": {
|
"agent_metrics": {
|
||||||
"total_iterations": task.total_iterations,
|
"total_iterations": task.total_iterations,
|
||||||
|
|
@ -2258,10 +2271,10 @@ async def generate_audit_report(
|
||||||
|
|
||||||
# Calculate statistics
|
# Calculate statistics
|
||||||
total = len(findings)
|
total = len(findings)
|
||||||
critical = sum(1 for f in findings if f.severity == 'critical')
|
critical = sum(1 for f in findings if normalize_severity(f.severity) == 'critical')
|
||||||
high = sum(1 for f in findings if f.severity == 'high')
|
high = sum(1 for f in findings if normalize_severity(f.severity) == 'high')
|
||||||
medium = sum(1 for f in findings if f.severity == 'medium')
|
medium = sum(1 for f in findings if normalize_severity(f.severity) == 'medium')
|
||||||
low = sum(1 for f in findings if f.severity == 'low')
|
low = sum(1 for f in findings if normalize_severity(f.severity) == 'low')
|
||||||
verified = sum(1 for f in findings if f.is_verified)
|
verified = sum(1 for f in findings if f.is_verified)
|
||||||
with_poc = sum(1 for f in findings if f.has_poc)
|
with_poc = sum(1 for f in findings if f.has_poc)
|
||||||
|
|
||||||
|
|
@ -2323,13 +2336,13 @@ async def generate_audit_report(
|
||||||
md_lines.append(f"| Severity | Count | Verified |")
|
md_lines.append(f"| Severity | Count | Verified |")
|
||||||
md_lines.append(f"|----------|-------|----------|")
|
md_lines.append(f"|----------|-------|----------|")
|
||||||
if critical > 0:
|
if critical > 0:
|
||||||
md_lines.append(f"| **CRITICAL** | {critical} | {sum(1 for f in findings if f.severity == 'critical' and f.is_verified)} |")
|
md_lines.append(f"| **CRITICAL** | {critical} | {sum(1 for f in findings if normalize_severity(f.severity) == 'critical' and f.is_verified)} |")
|
||||||
if high > 0:
|
if high > 0:
|
||||||
md_lines.append(f"| **HIGH** | {high} | {sum(1 for f in findings if f.severity == 'high' and f.is_verified)} |")
|
md_lines.append(f"| **HIGH** | {high} | {sum(1 for f in findings if normalize_severity(f.severity) == 'high' and f.is_verified)} |")
|
||||||
if medium > 0:
|
if medium > 0:
|
||||||
md_lines.append(f"| **MEDIUM** | {medium} | {sum(1 for f in findings if f.severity == 'medium' and f.is_verified)} |")
|
md_lines.append(f"| **MEDIUM** | {medium} | {sum(1 for f in findings if normalize_severity(f.severity) == 'medium' and f.is_verified)} |")
|
||||||
if low > 0:
|
if low > 0:
|
||||||
md_lines.append(f"| **LOW** | {low} | {sum(1 for f in findings if f.severity == 'low' and f.is_verified)} |")
|
md_lines.append(f"| **LOW** | {low} | {sum(1 for f in findings if normalize_severity(f.severity) == 'low' and f.is_verified)} |")
|
||||||
md_lines.append(f"| **Total** | {total} | {verified} |")
|
md_lines.append(f"| **Total** | {total} | {verified} |")
|
||||||
md_lines.append("")
|
md_lines.append("")
|
||||||
|
|
||||||
|
|
@ -2353,7 +2366,7 @@ async def generate_audit_report(
|
||||||
else:
|
else:
|
||||||
# Group findings by severity
|
# Group findings by severity
|
||||||
for severity_level, severity_name in [('critical', 'Critical'), ('high', 'High'), ('medium', 'Medium'), ('low', 'Low')]:
|
for severity_level, severity_name in [('critical', 'Critical'), ('high', 'High'), ('medium', 'Medium'), ('low', 'Low')]:
|
||||||
severity_findings = [f for f in findings if f.severity == severity_level]
|
severity_findings = [f for f in findings if normalize_severity(f.severity) == severity_level]
|
||||||
if not severity_findings:
|
if not severity_findings:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -104,20 +104,18 @@ function formatBytes(bytes: number): string {
|
||||||
|
|
||||||
// ============ Sub Components ============
|
// ============ Sub Components ============
|
||||||
|
|
||||||
// Report stats summary
|
// Report stats summary - uses task statistics for reliability
|
||||||
const ReportStats = memo(function ReportStats({
|
const ReportStats = memo(function ReportStats({
|
||||||
task,
|
task,
|
||||||
findings,
|
|
||||||
}: {
|
}: {
|
||||||
task: AgentTask;
|
task: AgentTask;
|
||||||
findings: AgentFinding[];
|
findings: AgentFinding[]; // Keep for API compatibility, but use task stats
|
||||||
}) {
|
}) {
|
||||||
const severityCounts = {
|
// Use task's reliable statistics instead of computing from findings array
|
||||||
critical: findings.filter(f => f.severity.toLowerCase() === "critical").length,
|
// This ensures consistency even when findings array is empty or not loaded
|
||||||
high: findings.filter(f => f.severity.toLowerCase() === "high").length,
|
const totalFindings = task.findings_count || 0;
|
||||||
medium: findings.filter(f => f.severity.toLowerCase() === "medium").length,
|
const criticalAndHigh = (task.critical_count || 0) + (task.high_count || 0);
|
||||||
low: findings.filter(f => f.severity.toLowerCase() === "low").length,
|
const verified = task.verified_count || 0;
|
||||||
};
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="grid grid-cols-4 gap-2 mb-4">
|
<div className="grid grid-cols-4 gap-2 mb-4">
|
||||||
|
|
@ -137,7 +135,7 @@ const ReportStats = memo(function ReportStats({
|
||||||
<span className="text-[9px] text-slate-500 uppercase tracking-wider">Findings</span>
|
<span className="text-[9px] text-slate-500 uppercase tracking-wider">Findings</span>
|
||||||
</div>
|
</div>
|
||||||
<div className="text-lg font-bold font-mono text-white">
|
<div className="text-lg font-bold font-mono text-white">
|
||||||
{findings.length}
|
{totalFindings}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
@ -147,7 +145,7 @@ const ReportStats = memo(function ReportStats({
|
||||||
<span className="text-[9px] text-slate-500 uppercase tracking-wider">Critical</span>
|
<span className="text-[9px] text-slate-500 uppercase tracking-wider">Critical</span>
|
||||||
</div>
|
</div>
|
||||||
<div className="text-lg font-bold font-mono text-rose-400">
|
<div className="text-lg font-bold font-mono text-rose-400">
|
||||||
{severityCounts.critical + severityCounts.high}
|
{criticalAndHigh}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
@ -157,7 +155,7 @@ const ReportStats = memo(function ReportStats({
|
||||||
<span className="text-[9px] text-slate-500 uppercase tracking-wider">Verified</span>
|
<span className="text-[9px] text-slate-500 uppercase tracking-wider">Verified</span>
|
||||||
</div>
|
</div>
|
||||||
<div className="text-lg font-bold font-mono text-teal-400">
|
<div className="text-lg font-bold font-mono text-teal-400">
|
||||||
{findings.filter(f => f.is_verified).length}
|
{verified}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
@ -351,49 +349,15 @@ export const ReportExportDialog = memo(function ReportExportDialog({
|
||||||
setPreview(prev => ({ ...prev, loading: true, error: null }));
|
setPreview(prev => ({ ...prev, loading: true, error: null }));
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// For JSON, we can generate it client-side
|
// For JSON, fetch from backend API to ensure data consistency
|
||||||
|
// The backend properly queries findings from the database
|
||||||
if (format === "json") {
|
if (format === "json") {
|
||||||
const reportData = {
|
const response = await apiClient.get(`/agent-tasks/${task.id}/report`, {
|
||||||
report_metadata: {
|
params: { format: "json" },
|
||||||
task_id: task.id,
|
});
|
||||||
project_id: task.project_id,
|
|
||||||
generated_at: new Date().toISOString(),
|
|
||||||
task_status: task.status,
|
|
||||||
duration_seconds: task.completed_at && task.started_at
|
|
||||||
? Math.round((new Date(task.completed_at).getTime() - new Date(task.started_at).getTime()) / 1000)
|
|
||||||
: null,
|
|
||||||
},
|
|
||||||
summary: {
|
|
||||||
security_score: task.security_score,
|
|
||||||
total_files_analyzed: task.analyzed_files,
|
|
||||||
total_findings: findings.length,
|
|
||||||
verified_findings: findings.filter(f => f.is_verified).length,
|
|
||||||
severity_distribution: {
|
|
||||||
critical: task.critical_count,
|
|
||||||
high: task.high_count,
|
|
||||||
medium: task.medium_count,
|
|
||||||
low: task.low_count,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
findings: findings.map(f => ({
|
|
||||||
id: f.id,
|
|
||||||
vulnerability_type: f.vulnerability_type,
|
|
||||||
severity: f.severity,
|
|
||||||
title: f.title,
|
|
||||||
description: f.description,
|
|
||||||
file_path: f.file_path,
|
|
||||||
line_start: f.line_start,
|
|
||||||
line_end: f.line_end,
|
|
||||||
code_snippet: f.code_snippet,
|
|
||||||
is_verified: f.is_verified,
|
|
||||||
has_poc: f.has_poc,
|
|
||||||
suggestion: f.suggestion,
|
|
||||||
ai_confidence: f.ai_confidence,
|
|
||||||
})),
|
|
||||||
};
|
|
||||||
|
|
||||||
setPreview({
|
setPreview({
|
||||||
content: JSON.stringify(reportData, null, 2),
|
content: JSON.stringify(response.data, null, 2),
|
||||||
format: "json",
|
format: "json",
|
||||||
loading: false,
|
loading: false,
|
||||||
error: null,
|
error: null,
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,6 @@
|
||||||
import { memo } from "react";
|
import { memo } from "react";
|
||||||
import { Activity, FileCode, Repeat, Zap, Bug, Shield, AlertTriangle } from "lucide-react";
|
import { Activity, FileCode, Repeat, Zap, Bug, Shield, AlertTriangle } from "lucide-react";
|
||||||
import { Badge } from "@/components/ui/badge";
|
import { Badge } from "@/components/ui/badge";
|
||||||
import { calculateSeverityCounts } from "../utils";
|
|
||||||
import type { StatsPanelProps } from "../types";
|
import type { StatsPanelProps } from "../types";
|
||||||
|
|
||||||
// Circular progress component
|
// Circular progress component
|
||||||
|
|
@ -86,8 +85,15 @@ function MetricCard({ icon, label, value, suffix = "", colorClass = "text-slate-
|
||||||
export const StatsPanel = memo(function StatsPanel({ task, findings }: StatsPanelProps) {
|
export const StatsPanel = memo(function StatsPanel({ task, findings }: StatsPanelProps) {
|
||||||
if (!task) return null;
|
if (!task) return null;
|
||||||
|
|
||||||
const severityCounts = calculateSeverityCounts(findings);
|
// 🔥 Use task's reliable statistics instead of computing from findings array
|
||||||
const totalFindings = Object.values(severityCounts).reduce((a, b) => a + b, 0);
|
// This ensures consistency even when findings array is empty or not loaded
|
||||||
|
const severityCounts = {
|
||||||
|
critical: task.critical_count || 0,
|
||||||
|
high: task.high_count || 0,
|
||||||
|
medium: task.medium_count || 0,
|
||||||
|
low: task.low_count || 0,
|
||||||
|
};
|
||||||
|
const totalFindings = task.findings_count || 0;
|
||||||
const progressPercent = task.progress_percentage || 0;
|
const progressPercent = task.progress_percentage || 0;
|
||||||
|
|
||||||
// Determine score color
|
// Determine score color
|
||||||
|
|
@ -212,8 +218,7 @@ export const StatsPanel = memo(function StatsPanel({ task, findings }: StatsPane
|
||||||
color={getScoreColor(task.security_score)}
|
color={getScoreColor(task.security_score)}
|
||||||
/>
|
/>
|
||||||
<div className="absolute inset-0 flex items-center justify-center">
|
<div className="absolute inset-0 flex items-center justify-center">
|
||||||
<span className={`text-sm font-bold font-mono ${
|
<span className={`text-sm font-bold font-mono ${task.security_score >= 80 ? 'text-emerald-400' :
|
||||||
task.security_score >= 80 ? 'text-emerald-400' :
|
|
||||||
task.security_score >= 60 ? 'text-amber-400' :
|
task.security_score >= 60 ? 'text-amber-400' :
|
||||||
'text-rose-400'
|
'text-rose-400'
|
||||||
}`}>
|
}`}>
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue