diff --git a/app/agent/__init__.py b/app/agent/__init__.py index e46b3ee4..cfb1b204 100644 --- a/app/agent/__init__.py +++ b/app/agent/__init__.py @@ -1,4 +1,5 @@ import asyncio +import traceback from time import strftime from typing import Dict, List @@ -145,7 +146,9 @@ class MoviePilotAgent: try: # Agent运行配置 agent_config = { - "" + "configurable": { + "thread_id": self.session_id, + } } # 创建智能体 @@ -177,7 +180,7 @@ class MoviePilotAgent: logger.info(f"Agent执行被取消: session_id={self.session_id}") return "任务已取消", {} except Exception as e: - logger.error(f"Agent执行失败: {e}") + logger.error(f"Agent执行失败: {e} - {traceback.format_exc()}") return str(e), {} async def send_agent_message(self, message: str, title: str = "MoviePilot助手"): diff --git a/app/helper/llm.py b/app/helper/llm.py index 0ddd9921..46e2ca09 100644 --- a/app/helper/llm.py +++ b/app/helper/llm.py @@ -27,7 +27,7 @@ class LLMHelper: # 通过代理使用 Google 的 OpenAI 兼容接口 from langchain_openai import ChatOpenAI - return ChatOpenAI( + model = ChatOpenAI( model=settings.LLM_MODEL, api_key=api_key, max_retries=3, @@ -41,7 +41,7 @@ class LLMHelper: # 使用 langchain-google-genai 原生接口(v4 API 变更:google_api_key → api_key,max_retries → retries) from langchain_google_genai import ChatGoogleGenerativeAI - return ChatGoogleGenerativeAI( + model = ChatGoogleGenerativeAI( model=settings.LLM_MODEL, api_key=api_key, retries=3, @@ -51,7 +51,7 @@ class LLMHelper: elif provider == "deepseek": from langchain_deepseek import ChatDeepSeek - return ChatDeepSeek( + model = ChatDeepSeek( model=settings.LLM_MODEL, api_key=api_key, max_retries=3, @@ -62,7 +62,7 @@ class LLMHelper: else: from langchain_openai import ChatOpenAI - return ChatOpenAI( + model = ChatOpenAI( model=settings.LLM_MODEL, api_key=api_key, max_retries=3, @@ -73,8 +73,18 @@ class LLMHelper: openai_proxy=settings.PROXY_HOST, ) + # 检查是否有profile + if hasattr(model, "profile") and model.profile: + logger.info(f"使用LLM模型: {model.model},Profile: {model.profile}") + else: + model.profile = { + "max_input_tokens": settings.LLM_MAX_CONTEXT_TOKENS * 1000, # 转换为token单位 + } + + return model + def get_models( - self, provider: str, api_key: str, base_url: str = None + self, provider: str, api_key: str, base_url: str = None ) -> List[str]: """获取模型列表""" logger.info(f"获取 {provider} 模型列表...") @@ -102,7 +112,7 @@ class LLMHelper: @staticmethod def _get_openai_compatible_models( - provider: str, api_key: str, base_url: str = None + provider: str, api_key: str, base_url: str = None ) -> List[str]: """获取OpenAI兼容模型列表""" try: diff --git a/app/modules/qqbot/qqbot.py b/app/modules/qqbot/qqbot.py index 0e2c838c..a9ea7826 100644 --- a/app/modules/qqbot/qqbot.py +++ b/app/modules/qqbot/qqbot.py @@ -11,7 +11,6 @@ from typing import Optional, List, Tuple from PIL import Image -from app.chain.message import MessageChain from app.core.cache import FileCache from app.core.context import MediaInfo, Context from app.core.metainfo import MetaInfo @@ -101,11 +100,12 @@ class QQBot: """直接调用消息链处理,避免 HTTP 开销""" def _run(): try: - MessageChain().process( + # FIXME + """MessageChain().process( body=payload, form={}, args={"source": self._config_name}, - ) + )""" except Exception as e: logger.error(f"QQ Bot 转发消息失败: {e}") diff --git a/app/modules/telegram/telegram.py b/app/modules/telegram/telegram.py index 513e173b..45c1b2b2 100644 --- a/app/modules/telegram/telegram.py +++ b/app/modules/telegram/telegram.py @@ -83,8 +83,8 @@ class Telegram: # 发送正在输入状态 try: _bot.send_chat_action(message.chat.id, 'typing') - except Exception as e: - logger.error(f"发送Telegram正在输入状态失败:{e}") + except Exception as err: + logger.error(f"发送Telegram正在输入状态失败:{err}") RequestUtils(timeout=15).post_res(self._ds_url, json=message.json) @_bot.callback_query_handler(func=lambda call: True) diff --git a/app/modules/wechat/wechatbot.py b/app/modules/wechat/wechatbot.py index 65250605..e3a30ea6 100644 --- a/app/modules/wechat/wechatbot.py +++ b/app/modules/wechat/wechatbot.py @@ -9,12 +9,11 @@ from typing import Optional, List, Dict, Tuple, Set import websocket -from app.chain.message import MessageChain from app.core.cache import FileCache +from app.core.config import settings from app.core.context import MediaInfo, Context from app.core.metainfo import MetaInfo from app.log import logger -from app.schemas.types import MessageChannel from app.utils.string import StringUtils @@ -27,6 +26,7 @@ class WeChatBot: """ _default_ws_url = "wss://openws.work.weixin.qq.com" + _ds_url = f"http://127.0.0.1:{settings.PORT}/api/v1/message?token={settings.API_TOKEN}" _heartbeat_interval = 30 _ack_timeout = 10 @@ -361,13 +361,14 @@ class WeChatBot: def _forward_to_message_chain(self, userid: str, text: str) -> None: def _run(): try: - MessageChain().handle_message( + # FIXME + """MessageChain().handle_message( channel=MessageChannel.Wechat, source=self._config_name, userid=userid, username=userid, text=text, - ) + )""" except Exception as err: logger.error(f"企业微信智能机器人转发消息失败:{err}")