mirror of
https://github.com/jxxghp/MoviePilot.git
synced 2026-05-06 21:34:41 +08:00
- Add compatibility patch for langchain-openai responses API to ensure system messages are extracted as top-level instructions, addressing Codex endpoint requirements. - Update provider list: add Alibaba, Volcengine, and Tencent TokenHub; adjust SiliconFlow and MiniMax endpoints; refine provider ordering and model list strategies. - Extend models.dev-only listing logic for providers lacking stable models.list endpoints. - Increase models.dev cache TTL for improved efficiency. - Add tests for openai responses API and streaming compatibility patches.
15 lines
313 B
Python
15 lines
313 B
Python
import asyncio
|
|
from app.agent.llm.helper import LLMHelper
|
|
from app.core.config import settings
|
|
import json
|
|
|
|
async def run():
|
|
llm = await LLMHelper.get_llm(
|
|
streaming=False,
|
|
provider="chatgpt",
|
|
model="gpt-5.1-codex",
|
|
)
|
|
print("streaming:", llm.streaming)
|
|
|
|
asyncio.run(run())
|