Restore the CI history deletion feature
Build and Push CodeReview / build (push) Waiting to run
Details
Build and Push CodeReview / build (push) Waiting to run
Details
This commit is contained in:
parent
e537010dfd
commit
83a9a85382
|
|
@ -33,7 +33,7 @@ class PRReview(Base):
|
|||
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
||||
|
||||
# Relationships
|
||||
project = relationship("Project", backref="pr_reviews")
|
||||
project = relationship("Project", back_populates="pr_reviews")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<PRReview pr={self.pr_number} event={self.event_type}>"
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ class Project(Base):
|
|||
members = relationship("ProjectMember", back_populates="project", cascade="all, delete-orphan")
|
||||
tasks = relationship("AuditTask", back_populates="project", cascade="all, delete-orphan")
|
||||
agent_tasks = relationship("AgentTask", back_populates="project", cascade="all, delete-orphan")
|
||||
pr_reviews = relationship("PRReview", back_populates="project", cascade="all, delete-orphan")
|
||||
|
||||
class ProjectMember(Base):
|
||||
__tablename__ = "project_members"
|
||||
|
|
|
|||
|
|
@ -632,11 +632,17 @@ async def scan_repo_task(task_id: str, db_session_factory, user_config: dict = N
|
|||
if not content or not content.strip():
|
||||
print(f"⚠️ 文件内容为空,跳过: {file_info['path']}")
|
||||
skipped_files += 1
|
||||
# 更新总文件数,因为该文件被跳过,不应计入总数
|
||||
task.total_files = max(0, task.total_files - 1)
|
||||
await db.commit()
|
||||
continue
|
||||
|
||||
if len(content) > settings.MAX_FILE_SIZE_BYTES:
|
||||
print(f"⚠️ 文件太大,跳过: {file_info['path']}")
|
||||
skipped_files += 1
|
||||
# 更新总文件数,因为该文件被跳过
|
||||
task.total_files = max(0, task.total_files - 1)
|
||||
await db.commit()
|
||||
continue
|
||||
|
||||
file_lines = content.split('\n')
|
||||
|
|
@ -711,13 +717,14 @@ async def scan_repo_task(task_id: str, db_session_factory, user_config: dict = N
|
|||
consecutive_failures = 0 # 成功后重置
|
||||
scanned_files += 1
|
||||
|
||||
# 更新进度
|
||||
task.scanned_files = scanned_files
|
||||
# 更新进度(成功 + 失败)
|
||||
processed_count = scanned_files + failed_files
|
||||
task.scanned_files = processed_count
|
||||
task.total_lines = total_lines
|
||||
task.issues_count = total_issues
|
||||
await db.commit()
|
||||
|
||||
print(f"📈 任务 {task_id}: 进度 {scanned_files}/{len(files)} ({int(scanned_files/len(files)*100)}%)")
|
||||
print(f"📈 任务 {task_id}: 进度 {processed_count}/{task.total_files} ({int(processed_count/task.total_files*100) if task.total_files > 0 else 0}%)")
|
||||
|
||||
# 请求间隔
|
||||
await asyncio.sleep(llm_gap_ms / 1000)
|
||||
|
|
@ -725,6 +732,10 @@ async def scan_repo_task(task_id: str, db_session_factory, user_config: dict = N
|
|||
except Exception as file_error:
|
||||
failed_files += 1
|
||||
consecutive_failures += 1
|
||||
# 失败也更新进度
|
||||
task.scanned_files = scanned_files + failed_files
|
||||
await db.commit()
|
||||
|
||||
# 打印详细错误信息
|
||||
import traceback
|
||||
print(f"❌ 分析文件失败 ({file_info['path']}): {file_error}")
|
||||
|
|
@ -763,12 +774,18 @@ async def scan_repo_task(task_id: str, db_session_factory, user_config: dict = N
|
|||
else:
|
||||
task.status = "completed"
|
||||
task.completed_at = datetime.now(timezone.utc)
|
||||
# 最终显示的已扫描文件数为成功分析的文件数
|
||||
task.scanned_files = scanned_files
|
||||
task.total_lines = total_lines
|
||||
task.issues_count = total_issues
|
||||
task.quality_score = avg_quality_score
|
||||
await db.commit()
|
||||
print(f"✅ 任务 {task_id} 完成: 扫描 {scanned_files} 个文件, 发现 {total_issues} 个问题, 质量分 {avg_quality_score:.1f}")
|
||||
|
||||
result_msg = f"✅ 任务 {task_id} 完成: 成功分析 {scanned_files} 个文件"
|
||||
if failed_files > 0:
|
||||
result_msg += f", {failed_files} 个文件失败"
|
||||
result_msg += f", 发现 {total_issues} 个问题, 质量分 {avg_quality_score:.1f}"
|
||||
print(result_msg)
|
||||
task_control.cleanup_task(task_id)
|
||||
|
||||
except Exception as e:
|
||||
|
|
|
|||
Loading…
Reference in New Issue