feat: 将最大分析文件数默认值改为0表示无限制

修改前后端配置文件和文档,将 MAX_ANALYZE_FILES 默认值从50改为0表示无限制
同时更新相关逻辑判断条件,仅在 MAX_ANALYZE_FILES > 0 时进行文件数限制
This commit is contained in:
lintsinghua 2025-12-16 13:04:09 +08:00
parent 323b4ac8e3
commit 15743e0b18
10 changed files with 18 additions and 16 deletions

View File

@ -101,7 +101,7 @@ async def process_zip_task(task_id: str, file_path: str, db_session_factory, use
normalized_targets = {normalize_path(p) for p in target_files}
print(f"🎯 ZIP任务: 指定分析 {len(normalized_targets)} 个文件")
files_to_scan = [f for f in files_to_scan if f['path'] in normalized_targets]
else:
elif settings.MAX_ANALYZE_FILES > 0:
files_to_scan = files_to_scan[:settings.MAX_ANALYZE_FILES]
task.total_files = len(files_to_scan)

View File

@ -66,7 +66,7 @@ class Settings(BaseSettings):
GITLAB_TOKEN: Optional[str] = None
# 扫描配置
MAX_ANALYZE_FILES: int = 50 # 最大分析文件数
MAX_ANALYZE_FILES: int = 0 # 最大分析文件数0表示无限制
MAX_FILE_SIZE_BYTES: int = 200 * 1024 # 最大文件大小 200KB
LLM_CONCURRENCY: int = 3 # LLM并发数
LLM_GAP_MS: int = 2000 # LLM请求间隔毫秒

View File

@ -350,7 +350,7 @@ async def scan_repo_task(task_id: str, db_session_factory, user_config: dict = N
if target_files:
print(f"🎯 指定分析 {len(target_files)} 个文件")
files = [f for f in files if f['path'] in target_files]
else:
elif settings.MAX_ANALYZE_FILES > 0:
files = files[:settings.MAX_ANALYZE_FILES]
task.total_files = len(files)

View File

@ -187,7 +187,7 @@ GITLAB_TOKEN=
# 扫描配置
# =============================================
# 单次扫描最大文件数
MAX_ANALYZE_FILES=50
MAX_ANALYZE_FILES=0
# 单文件最大大小(字节),默认 200KB
MAX_FILE_SIZE_BYTES=204800

View File

@ -41,6 +41,7 @@ services:
- ALL_PROXY=
restart: unless-stopped
volumes:
- ./backend/app:/app/app:ro # 挂载代码目录,修改后自动生效
- backend_uploads:/app/uploads
- /var/run/docker.sock:/var/run/docker.sock # 沙箱执行必须
ports:
@ -63,7 +64,8 @@ services:
condition: service_healthy
redis:
condition: service_healthy
command: sh -c ".venv/bin/alembic upgrade head && .venv/bin/uvicorn app.main:app --host 0.0.0.0 --port 8000"
# 开发模式:启用 --reload 热重载
command: sh -c ".venv/bin/alembic upgrade head && .venv/bin/uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload"
networks:
- deepaudit-network

View File

@ -82,7 +82,7 @@ GITHUB_TOKEN= # GitHub Personal Access Token
GITLAB_TOKEN= # GitLab Personal Access Token
# ========== 扫描配置 ==========
MAX_ANALYZE_FILES=50 # 单次扫描最大文件数
MAX_ANALYZE_FILES=0 # 单次扫描最大文件数0表示无限制
MAX_FILE_SIZE_BYTES=204800 # 单文件最大大小(字节),默认 200KB
LLM_CONCURRENCY=3 # LLM 并发请求数
LLM_GAP_MS=2000 # 请求间隔(毫秒),避免限流
@ -164,7 +164,7 @@ VITE_API_BASE_URL=/api # 后端 API 地址
VITE_APP_ID=deepaudit
# ========== 代码分析配置 ==========
VITE_MAX_ANALYZE_FILES=40 # 最大分析文件数
VITE_MAX_ANALYZE_FILES=0 # 最大分析文件数0表示无限制
VITE_LLM_CONCURRENCY=2 # LLM 并发数
VITE_LLM_GAP_MS=500 # 请求间隔(毫秒)
VITE_OUTPUT_LANGUAGE=zh-CN # 输出语言
@ -175,7 +175,7 @@ VITE_OUTPUT_LANGUAGE=zh-CN # 输出语言
| 配置项 | 说明 | 默认值 |
|--------|------|--------|
| `VITE_API_BASE_URL` | 后端 API 地址Docker 部署时使用 `/api` | `/api` |
| `VITE_MAX_ANALYZE_FILES` | 单次扫描最大文件数 | `40` |
| `VITE_MAX_ANALYZE_FILES` | 单次扫描最大文件数0表示无限制 | `0` |
| `VITE_LLM_CONCURRENCY` | 前端 LLM 并发请求数 | `2` |
| `VITE_LLM_GAP_MS` | 前端请求间隔 | `500` |
| `VITE_OUTPUT_LANGUAGE` | 分析结果输出语言 | `zh-CN` |

View File

@ -335,10 +335,10 @@ LLM_CONCURRENCY=5 # 增加并发(注意 API 限流)
LLM_GAP_MS=500 # 减少请求间隔
```
**2. 限制分析文件数**
**2. 限制分析文件数**(默认无限制)
```env
MAX_ANALYZE_FILES=30 # 减少单次分析文件数
MAX_ANALYZE_FILES=30 # 设置单次分析文件数限制
```
**3. 使用更快的模型**

View File

@ -41,7 +41,7 @@ VITE_APP_ID=deepaudit
# 代码分析配置
# =============================================
# 单次扫描最大文件数
VITE_MAX_ANALYZE_FILES=40
VITE_MAX_ANALYZE_FILES=0
# LLM 并发请求数
VITE_LLM_CONCURRENCY=2

View File

@ -78,7 +78,7 @@ export function SystemConfig() {
llmMaxTokens: llmConfig.llmMaxTokens || 4096,
githubToken: otherConfig.githubToken || '',
gitlabToken: otherConfig.gitlabToken || '',
maxAnalyzeFiles: otherConfig.maxAnalyzeFiles || 50,
maxAnalyzeFiles: otherConfig.maxAnalyzeFiles ?? 0,
llmConcurrency: otherConfig.llmConcurrency || 3,
llmGapMs: otherConfig.llmGapMs || 2000,
outputLanguage: otherConfig.outputLanguage || 'zh-CN',
@ -98,7 +98,7 @@ export function SystemConfig() {
llmProvider: 'openai', llmApiKey: '', llmModel: '', llmBaseUrl: '',
llmTimeout: 150000, llmTemperature: 0.1, llmMaxTokens: 4096,
githubToken: '', gitlabToken: '',
maxAnalyzeFiles: 50, llmConcurrency: 3, llmGapMs: 2000, outputLanguage: 'zh-CN',
maxAnalyzeFiles: 0, llmConcurrency: 3, llmGapMs: 2000, outputLanguage: 'zh-CN',
});
}
} catch (error) {
@ -107,7 +107,7 @@ export function SystemConfig() {
llmProvider: 'openai', llmApiKey: '', llmModel: '', llmBaseUrl: '',
llmTimeout: 150000, llmTemperature: 0.1, llmMaxTokens: 4096,
githubToken: '', gitlabToken: '',
maxAnalyzeFiles: 50, llmConcurrency: 3, llmGapMs: 2000, outputLanguage: 'zh-CN',
maxAnalyzeFiles: 0, llmConcurrency: 3, llmGapMs: 2000, outputLanguage: 'zh-CN',
});
} finally {
setLoading(false);
@ -144,7 +144,7 @@ export function SystemConfig() {
llmMaxTokens: llmConfig.llmMaxTokens || 4096,
githubToken: otherConfig.githubToken || '',
gitlabToken: otherConfig.gitlabToken || '',
maxAnalyzeFiles: otherConfig.maxAnalyzeFiles || 50,
maxAnalyzeFiles: otherConfig.maxAnalyzeFiles ?? 0,
llmConcurrency: otherConfig.llmConcurrency || 3,
llmGapMs: otherConfig.llmGapMs || 2000,
outputLanguage: otherConfig.outputLanguage || 'zh-CN',

View File

@ -79,7 +79,7 @@ export const ANALYSIS_DEPTH = {
// 默认配置(与后端对齐)
export const DEFAULT_CONFIG = {
MAX_FILE_SIZE: 200 * 1024, // 200KB (对齐后端 MAX_FILE_SIZE_BYTES)
MAX_FILES_PER_SCAN: 50, // 对齐后端 MAX_ANALYZE_FILES
MAX_FILES_PER_SCAN: 0, // 对齐后端 MAX_ANALYZE_FILES0表示无限制
ANALYSIS_TIMEOUT: 30000, // 30秒
DEBOUNCE_DELAY: 300, // 300ms
} as const;