From 7d29fe0f2aa2bcf818638c6006061ca60b980b5c Mon Sep 17 00:00:00 2001 From: lintsinghua Date: Mon, 15 Dec 2025 15:45:16 +0800 Subject: [PATCH] fix: disable proxy environment variables in docker-compose.yml to ensure external API connectivity --- .../services/llm/adapters/litellm_adapter.py | 40 +++++++++++++++++-- docker-compose.yml | 6 +++ 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/backend/app/services/llm/adapters/litellm_adapter.py b/backend/app/services/llm/adapters/litellm_adapter.py index af3f4fa..c0821cb 100644 --- a/backend/app/services/llm/adapters/litellm_adapter.py +++ b/backend/app/services/llm/adapters/litellm_adapter.py @@ -66,13 +66,43 @@ class LiteLLMAdapter(BaseLLMAdapter): self._api_base = self._get_api_base() def _get_litellm_model(self) -> str: - """获取 LiteLLM 格式的模型名称""" + """获取 LiteLLM 格式的模型名称 + + 对于使用第三方 OpenAI 兼容 API(如 SiliconFlow)的情况: + - 如果用户设置了自定义 base_url,且模型名包含 / (如 Qwen/Qwen3-8B) + - 需要将其转换为 openai/Qwen/Qwen3-8B 格式 + - 因为 LiteLLM 只认识 openai 作为有效前缀 + """ provider = self.config.provider model = self.config.model # 检查模型名是否已经包含前缀 if "/" in model: - return model + # 提取第一部分作为可能的 provider 前缀 + prefix_part = model.split("/")[0].lower() + + # LiteLLM 认识的有效 provider 前缀列表 + valid_litellm_prefixes = [ + "openai", "anthropic", "gemini", "deepseek", "ollama", + "azure", "huggingface", "together", "groq", "mistral", + "anyscale", "replicate", "bedrock", "vertex_ai", "cohere", + "sagemaker", "palm", "ai21", "nlp_cloud", "aleph_alpha", + "petals", "baseten", "vllm", "cloudflare", "xinference" + ] + + # 如果前缀是 LiteLLM 认识的,直接返回 + if prefix_part in valid_litellm_prefixes: + return model + + # 如果用户设置了自定义 base_url,将其视为 OpenAI 兼容 API + # 例如 SiliconFlow 使用模型名 "Qwen/Qwen3-8B" + if self.config.base_url: + logger.debug(f"使用自定义 base_url,将模型 {model} 视为 OpenAI 兼容格式") + return f"openai/{model}" + + # 对于没有自定义 base_url 的情况,尝试使用 provider 的前缀 + prefix = self.PROVIDER_PREFIX_MAP.get(provider, "openai") + return f"{prefix}/{model}" # 获取 provider 前缀 prefix = self.PROVIDER_PREFIX_MAP.get(provider, "openai") @@ -107,6 +137,10 @@ class LiteLLMAdapter(BaseLLMAdapter): """发送请求到 LiteLLM""" import litellm + # 启用 LiteLLM 调试模式以获取更详细的错误信息 + # 注释掉下一行可关闭调试模式 + # litellm._turn_on_debug() + # 禁用 LiteLLM 的缓存,确保每次都实际调用 API litellm.cache = None @@ -152,7 +186,7 @@ class LiteLLMAdapter(BaseLLMAdapter): # 设置 API Base URL if self._api_base: kwargs["api_base"] = self._api_base - print(f"🔗 使用自定义 API Base: {self._api_base}") + logger.debug(f"🔗 使用自定义 API Base: {self._api_base}") # 设置超时 kwargs["timeout"] = self.config.timeout diff --git a/docker-compose.yml b/docker-compose.yml index fed0ae2..769add8 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -45,6 +45,12 @@ services: - REDIS_URL=redis://redis:6379/0 - AGENT_ENABLED=true - SANDBOX_ENABLED=true + # 禁用代理设置,防止容器内无法连接外部 API + - HTTP_PROXY= + - HTTPS_PROXY= + - http_proxy= + - https_proxy= + - NO_PROXY=* depends_on: db: condition: service_healthy