feat: 添加敏感信息加密存储功能
- 新增 encryption.py 加密服务,使用 Fernet 对称加密 - API Key、Token 等敏感字段在数据库中加密存储 - 读取时自动解密,兼容未加密的旧数据 - 优化配置保存后自动更新前端状态
This commit is contained in:
parent
bfef3b35a6
commit
f640bfbaba
|
|
@ -14,9 +14,36 @@ from app.db.session import get_db
|
|||
from app.models.user_config import UserConfig
|
||||
from app.models.user import User
|
||||
from app.core.config import settings
|
||||
from app.core.encryption import encrypt_sensitive_data, decrypt_sensitive_data
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
# 需要加密的敏感字段列表
|
||||
SENSITIVE_LLM_FIELDS = [
|
||||
'llmApiKey', 'geminiApiKey', 'openaiApiKey', 'claudeApiKey',
|
||||
'qwenApiKey', 'deepseekApiKey', 'zhipuApiKey', 'moonshotApiKey',
|
||||
'baiduApiKey', 'minimaxApiKey', 'doubaoApiKey'
|
||||
]
|
||||
SENSITIVE_OTHER_FIELDS = ['githubToken', 'gitlabToken']
|
||||
|
||||
|
||||
def encrypt_config(config: dict, sensitive_fields: list) -> dict:
|
||||
"""加密配置中的敏感字段"""
|
||||
encrypted = config.copy()
|
||||
for field in sensitive_fields:
|
||||
if field in encrypted and encrypted[field]:
|
||||
encrypted[field] = encrypt_sensitive_data(encrypted[field])
|
||||
return encrypted
|
||||
|
||||
|
||||
def decrypt_config(config: dict, sensitive_fields: list) -> dict:
|
||||
"""解密配置中的敏感字段"""
|
||||
decrypted = config.copy()
|
||||
for field in sensitive_fields:
|
||||
if field in decrypted and decrypted[field]:
|
||||
decrypted[field] = decrypt_sensitive_data(decrypted[field])
|
||||
return decrypted
|
||||
|
||||
|
||||
class LLMConfigSchema(BaseModel):
|
||||
"""LLM配置Schema"""
|
||||
|
|
@ -128,6 +155,7 @@ async def get_my_config(
|
|||
default_config = get_default_config()
|
||||
|
||||
if not config:
|
||||
print(f"[Config] 用户 {current_user.id} 没有保存的配置,返回默认配置")
|
||||
# 返回系统默认配置
|
||||
return UserConfigResponse(
|
||||
id="",
|
||||
|
|
@ -141,6 +169,15 @@ async def get_my_config(
|
|||
user_llm_config = json.loads(config.llm_config) if config.llm_config else {}
|
||||
user_other_config = json.loads(config.other_config) if config.other_config else {}
|
||||
|
||||
# 解密敏感字段
|
||||
user_llm_config = decrypt_config(user_llm_config, SENSITIVE_LLM_FIELDS)
|
||||
user_other_config = decrypt_config(user_other_config, SENSITIVE_OTHER_FIELDS)
|
||||
|
||||
print(f"[Config] 用户 {current_user.id} 的保存配置:")
|
||||
print(f" - llmProvider: {user_llm_config.get('llmProvider')}")
|
||||
print(f" - llmApiKey: {'***' + user_llm_config.get('llmApiKey', '')[-4:] if user_llm_config.get('llmApiKey') else '(空)'}")
|
||||
print(f" - llmModel: {user_llm_config.get('llmModel')}")
|
||||
|
||||
merged_llm_config = {**default_config["llmConfig"], **user_llm_config}
|
||||
merged_other_config = {**default_config["otherConfig"], **user_other_config}
|
||||
|
||||
|
|
@ -166,34 +203,58 @@ async def update_my_config(
|
|||
)
|
||||
config = result.scalar_one_or_none()
|
||||
|
||||
# 准备要保存的配置数据(加密敏感字段)
|
||||
llm_data = config_in.llmConfig.dict(exclude_none=True) if config_in.llmConfig else {}
|
||||
other_data = config_in.otherConfig.dict(exclude_none=True) if config_in.otherConfig else {}
|
||||
|
||||
# 加密敏感字段
|
||||
llm_data_encrypted = encrypt_config(llm_data, SENSITIVE_LLM_FIELDS)
|
||||
other_data_encrypted = encrypt_config(other_data, SENSITIVE_OTHER_FIELDS)
|
||||
|
||||
if not config:
|
||||
# 创建新配置
|
||||
config = UserConfig(
|
||||
user_id=current_user.id,
|
||||
llm_config=json.dumps(config_in.llmConfig.dict(exclude_none=True) if config_in.llmConfig else {}),
|
||||
other_config=json.dumps(config_in.otherConfig.dict(exclude_none=True) if config_in.otherConfig else {}),
|
||||
llm_config=json.dumps(llm_data_encrypted),
|
||||
other_config=json.dumps(other_data_encrypted),
|
||||
)
|
||||
db.add(config)
|
||||
else:
|
||||
# 更新现有配置
|
||||
if config_in.llmConfig:
|
||||
existing_llm = json.loads(config.llm_config) if config.llm_config else {}
|
||||
existing_llm.update(config_in.llmConfig.dict(exclude_none=True))
|
||||
config.llm_config = json.dumps(existing_llm)
|
||||
# 先解密现有数据,再合并新数据,最后加密
|
||||
existing_llm = decrypt_config(existing_llm, SENSITIVE_LLM_FIELDS)
|
||||
existing_llm.update(llm_data) # 使用未加密的新数据合并
|
||||
config.llm_config = json.dumps(encrypt_config(existing_llm, SENSITIVE_LLM_FIELDS))
|
||||
|
||||
if config_in.otherConfig:
|
||||
existing_other = json.loads(config.other_config) if config.other_config else {}
|
||||
existing_other.update(config_in.otherConfig.dict(exclude_none=True))
|
||||
config.other_config = json.dumps(existing_other)
|
||||
# 先解密现有数据,再合并新数据,最后加密
|
||||
existing_other = decrypt_config(existing_other, SENSITIVE_OTHER_FIELDS)
|
||||
existing_other.update(other_data) # 使用未加密的新数据合并
|
||||
config.other_config = json.dumps(encrypt_config(existing_other, SENSITIVE_OTHER_FIELDS))
|
||||
|
||||
await db.commit()
|
||||
await db.refresh(config)
|
||||
|
||||
# 获取系统默认配置并合并(与 get_my_config 保持一致)
|
||||
default_config = get_default_config()
|
||||
user_llm_config = json.loads(config.llm_config) if config.llm_config else {}
|
||||
user_other_config = json.loads(config.other_config) if config.other_config else {}
|
||||
|
||||
# 解密后返回给前端
|
||||
user_llm_config = decrypt_config(user_llm_config, SENSITIVE_LLM_FIELDS)
|
||||
user_other_config = decrypt_config(user_other_config, SENSITIVE_OTHER_FIELDS)
|
||||
|
||||
merged_llm_config = {**default_config["llmConfig"], **user_llm_config}
|
||||
merged_other_config = {**default_config["otherConfig"], **user_other_config}
|
||||
|
||||
return UserConfigResponse(
|
||||
id=config.id,
|
||||
user_id=config.user_id,
|
||||
llmConfig=json.loads(config.llm_config) if config.llm_config else {},
|
||||
otherConfig=json.loads(config.other_config) if config.other_config else {},
|
||||
llmConfig=merged_llm_config,
|
||||
otherConfig=merged_other_config,
|
||||
created_at=config.created_at.isoformat() if config.created_at else "",
|
||||
updated_at=config.updated_at.isoformat() if config.updated_at else None,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,101 @@
|
|||
"""
|
||||
敏感信息加密服务
|
||||
使用 Fernet 对称加密算法加密 API Key 等敏感信息
|
||||
"""
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
from typing import Optional
|
||||
from cryptography.fernet import Fernet
|
||||
from app.core.config import settings
|
||||
|
||||
|
||||
class EncryptionService:
|
||||
"""加密服务 - 用于加密和解密敏感信息"""
|
||||
|
||||
_instance: Optional['EncryptionService'] = None
|
||||
_fernet: Optional[Fernet] = None
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
cls._instance._init_fernet()
|
||||
return cls._instance
|
||||
|
||||
def _init_fernet(self):
|
||||
"""初始化 Fernet 加密器,使用 SECRET_KEY 派生密钥"""
|
||||
# 使用 SHA256 哈希 SECRET_KEY 生成 32 字节密钥
|
||||
key_bytes = hashlib.sha256(settings.SECRET_KEY.encode()).digest()
|
||||
# Fernet 需要 base64 编码的 32 字节密钥
|
||||
fernet_key = base64.urlsafe_b64encode(key_bytes)
|
||||
self._fernet = Fernet(fernet_key)
|
||||
|
||||
def encrypt(self, plaintext: str) -> str:
|
||||
"""
|
||||
加密明文字符串
|
||||
|
||||
Args:
|
||||
plaintext: 要加密的明文
|
||||
|
||||
Returns:
|
||||
加密后的密文(base64编码)
|
||||
"""
|
||||
if not plaintext:
|
||||
return ""
|
||||
|
||||
encrypted = self._fernet.encrypt(plaintext.encode('utf-8'))
|
||||
return encrypted.decode('utf-8')
|
||||
|
||||
def decrypt(self, ciphertext: str) -> str:
|
||||
"""
|
||||
解密密文字符串
|
||||
|
||||
Args:
|
||||
ciphertext: 要解密的密文(base64编码)
|
||||
|
||||
Returns:
|
||||
解密后的明文
|
||||
"""
|
||||
if not ciphertext:
|
||||
return ""
|
||||
|
||||
try:
|
||||
decrypted = self._fernet.decrypt(ciphertext.encode('utf-8'))
|
||||
return decrypted.decode('utf-8')
|
||||
except Exception:
|
||||
# 如果解密失败,可能是未加密的旧数据,直接返回原值
|
||||
return ciphertext
|
||||
|
||||
def is_encrypted(self, value: str) -> bool:
|
||||
"""
|
||||
检查值是否已加密
|
||||
|
||||
Args:
|
||||
value: 要检查的值
|
||||
|
||||
Returns:
|
||||
是否已加密
|
||||
"""
|
||||
if not value:
|
||||
return False
|
||||
|
||||
try:
|
||||
# 尝试解密,如果成功说明是加密的
|
||||
self._fernet.decrypt(value.encode('utf-8'))
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
# 全局加密服务实例
|
||||
encryption_service = EncryptionService()
|
||||
|
||||
|
||||
def encrypt_sensitive_data(data: str) -> str:
|
||||
"""加密敏感数据的便捷函数"""
|
||||
return encryption_service.encrypt(data)
|
||||
|
||||
|
||||
def decrypt_sensitive_data(data: str) -> str:
|
||||
"""解密敏感数据的便捷函数"""
|
||||
return encryption_service.decrypt(data)
|
||||
|
|
@ -52,25 +52,52 @@ export function SystemConfig() {
|
|||
const loadConfig = async () => {
|
||||
try {
|
||||
setLoading(true);
|
||||
const defaultConfig = await api.getDefaultConfig();
|
||||
console.log('[SystemConfig] 开始加载配置...');
|
||||
|
||||
// 后端 /config/me 已经返回合并后的配置(用户配置优先,然后是系统默认配置)
|
||||
const backendConfig = await api.getUserConfig();
|
||||
|
||||
const merged: SystemConfigData = {
|
||||
llmProvider: backendConfig?.llmConfig?.llmProvider || defaultConfig?.llmConfig?.llmProvider || 'openai',
|
||||
llmApiKey: backendConfig?.llmConfig?.llmApiKey || '',
|
||||
llmModel: backendConfig?.llmConfig?.llmModel || '',
|
||||
llmBaseUrl: backendConfig?.llmConfig?.llmBaseUrl || '',
|
||||
llmTimeout: backendConfig?.llmConfig?.llmTimeout || defaultConfig?.llmConfig?.llmTimeout || 150000,
|
||||
llmTemperature: backendConfig?.llmConfig?.llmTemperature ?? defaultConfig?.llmConfig?.llmTemperature ?? 0.1,
|
||||
llmMaxTokens: backendConfig?.llmConfig?.llmMaxTokens || defaultConfig?.llmConfig?.llmMaxTokens || 4096,
|
||||
githubToken: backendConfig?.otherConfig?.githubToken || '',
|
||||
gitlabToken: backendConfig?.otherConfig?.gitlabToken || '',
|
||||
maxAnalyzeFiles: backendConfig?.otherConfig?.maxAnalyzeFiles || defaultConfig?.otherConfig?.maxAnalyzeFiles || 50,
|
||||
llmConcurrency: backendConfig?.otherConfig?.llmConcurrency || defaultConfig?.otherConfig?.llmConcurrency || 3,
|
||||
llmGapMs: backendConfig?.otherConfig?.llmGapMs || defaultConfig?.otherConfig?.llmGapMs || 2000,
|
||||
outputLanguage: backendConfig?.otherConfig?.outputLanguage || defaultConfig?.otherConfig?.outputLanguage || 'zh-CN',
|
||||
};
|
||||
setConfig(merged);
|
||||
console.log('[SystemConfig] 后端返回的原始数据:', JSON.stringify(backendConfig, null, 2));
|
||||
|
||||
if (backendConfig) {
|
||||
// 直接使用后端返回的合并配置
|
||||
const llmConfig = backendConfig.llmConfig || {};
|
||||
const otherConfig = backendConfig.otherConfig || {};
|
||||
|
||||
const newConfig = {
|
||||
llmProvider: llmConfig.llmProvider || 'openai',
|
||||
llmApiKey: llmConfig.llmApiKey || '',
|
||||
llmModel: llmConfig.llmModel || '',
|
||||
llmBaseUrl: llmConfig.llmBaseUrl || '',
|
||||
llmTimeout: llmConfig.llmTimeout || 150000,
|
||||
llmTemperature: llmConfig.llmTemperature ?? 0.1,
|
||||
llmMaxTokens: llmConfig.llmMaxTokens || 4096,
|
||||
githubToken: otherConfig.githubToken || '',
|
||||
gitlabToken: otherConfig.gitlabToken || '',
|
||||
maxAnalyzeFiles: otherConfig.maxAnalyzeFiles || 50,
|
||||
llmConcurrency: otherConfig.llmConcurrency || 3,
|
||||
llmGapMs: otherConfig.llmGapMs || 2000,
|
||||
outputLanguage: otherConfig.outputLanguage || 'zh-CN',
|
||||
};
|
||||
|
||||
console.log('[SystemConfig] 解析后的配置:', newConfig);
|
||||
setConfig(newConfig);
|
||||
|
||||
console.log('✓ 配置已加载:', {
|
||||
provider: llmConfig.llmProvider,
|
||||
hasApiKey: !!llmConfig.llmApiKey,
|
||||
model: llmConfig.llmModel,
|
||||
});
|
||||
} else {
|
||||
console.warn('[SystemConfig] 后端返回空数据,使用默认配置');
|
||||
// 如果获取失败,使用默认值
|
||||
setConfig({
|
||||
llmProvider: 'openai', llmApiKey: '', llmModel: '', llmBaseUrl: '',
|
||||
llmTimeout: 150000, llmTemperature: 0.1, llmMaxTokens: 4096,
|
||||
githubToken: '', gitlabToken: '',
|
||||
maxAnalyzeFiles: 50, llmConcurrency: 3, llmGapMs: 2000, outputLanguage: 'zh-CN',
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to load config:', error);
|
||||
setConfig({
|
||||
|
|
@ -87,7 +114,7 @@ export function SystemConfig() {
|
|||
const saveConfig = async () => {
|
||||
if (!config) return;
|
||||
try {
|
||||
await api.updateUserConfig({
|
||||
const savedConfig = await api.updateUserConfig({
|
||||
llmConfig: {
|
||||
llmProvider: config.llmProvider, llmApiKey: config.llmApiKey,
|
||||
llmModel: config.llmModel, llmBaseUrl: config.llmBaseUrl,
|
||||
|
|
@ -100,6 +127,28 @@ export function SystemConfig() {
|
|||
llmGapMs: config.llmGapMs, outputLanguage: config.outputLanguage,
|
||||
},
|
||||
});
|
||||
|
||||
// 使用后端返回的数据更新本地状态,确保数据同步
|
||||
if (savedConfig) {
|
||||
const llmConfig = savedConfig.llmConfig || {};
|
||||
const otherConfig = savedConfig.otherConfig || {};
|
||||
setConfig({
|
||||
llmProvider: llmConfig.llmProvider || config.llmProvider,
|
||||
llmApiKey: llmConfig.llmApiKey || '',
|
||||
llmModel: llmConfig.llmModel || '',
|
||||
llmBaseUrl: llmConfig.llmBaseUrl || '',
|
||||
llmTimeout: llmConfig.llmTimeout || 150000,
|
||||
llmTemperature: llmConfig.llmTemperature ?? 0.1,
|
||||
llmMaxTokens: llmConfig.llmMaxTokens || 4096,
|
||||
githubToken: otherConfig.githubToken || '',
|
||||
gitlabToken: otherConfig.gitlabToken || '',
|
||||
maxAnalyzeFiles: otherConfig.maxAnalyzeFiles || 50,
|
||||
llmConcurrency: otherConfig.llmConcurrency || 3,
|
||||
llmGapMs: otherConfig.llmGapMs || 2000,
|
||||
outputLanguage: otherConfig.outputLanguage || 'zh-CN',
|
||||
});
|
||||
}
|
||||
|
||||
setHasChanges(false);
|
||||
toast.success("配置已保存!");
|
||||
} catch (error) {
|
||||
|
|
|
|||
|
|
@ -248,8 +248,14 @@ export const api = {
|
|||
} | null> {
|
||||
try {
|
||||
const res = await apiClient.get('/config/me');
|
||||
console.log('[API] getUserConfig 成功:', {
|
||||
hasLlmConfig: !!res.data?.llmConfig,
|
||||
hasApiKey: !!res.data?.llmConfig?.llmApiKey,
|
||||
provider: res.data?.llmConfig?.llmProvider,
|
||||
});
|
||||
return res.data;
|
||||
} catch (e) {
|
||||
} catch (e: any) {
|
||||
console.error('[API] getUserConfig 失败:', e?.response?.status, e?.message);
|
||||
return null;
|
||||
}
|
||||
},
|
||||
|
|
|
|||
Loading…
Reference in New Issue