feat(provider): add kuaishou-wanqing endpoint with base URL presets and manual model input

This commit is contained in:
jxxghp
2026-05-08 15:16:29 +08:00
parent 14b366a648
commit e4c5a4f232
4 changed files with 103 additions and 8 deletions

View File

@@ -615,6 +615,29 @@ class LLMProviderManager(metaclass=Singleton):
description="京东云 JoyBuilder OpenAI-compatible 端点,支持通用 API 与 Coding Plan 地址预设。",
sort_order=150,
),
ProviderSpec(
id="kuaishou-wanqing",
name="快手万擎",
runtime="openai_compatible",
default_base_url="https://wanqing.streamlakeapi.com/api/gateway/v1/endpoints",
base_url_presets=(
url_preset(
id="kuaishou-wanqing-usage",
label="按量计费",
value="https://wanqing.streamlakeapi.com/api/gateway/v1/endpoints",
),
url_preset(
id="kuaishou-wanqing-coding",
label="Coding Plan",
value="https://wanqing.streamlakeapi.com/api/gateway/coding/v1",
),
),
api_key_hint="填写快手万擎 API Key模型名称请填写万擎控制台或 OpenClaw 配置中的 model ID。",
supports_model_refresh=False,
model_list_strategy="manual",
description="快手万擎 OpenAI-compatible 端点,支持按量计费与 Coding Plan 地址预设。",
sort_order=155,
),
ProviderSpec(
id="volcengine",
name="火山方舟",
@@ -1710,6 +1733,11 @@ class LLMProviderManager(metaclass=Singleton):
# 在使用目录型 provider 时也能拿到最新参数。
if force_refresh:
await self.get_models_dev_data(force_refresh=True)
if resolved_model_list_strategy == "manual":
# 万擎等推理点型平台没有稳定的全局模型目录,模型 ID 需要用户从控制台复制。
return []
runtime = await self.resolve_runtime(
provider_id,
model=None,

View File

@@ -87,6 +87,11 @@ LLM_PROVIDER_DEFAULTS = {
"model": "",
"base_url": "https://modelservice.jdcloud.com/v1",
},
"kuaishou-wanqing": {
"model": "",
"base_url": "https://wanqing.streamlakeapi.com/api/gateway/v1/endpoints",
"base_url_preset": "kuaishou-wanqing-usage",
},
}
LLM_PROVIDER_FALLBACK_CHOICES = {
"deepseek": "DeepSeek",
@@ -97,6 +102,7 @@ LLM_PROVIDER_FALLBACK_CHOICES = {
"openrouter": "OpenRouter",
"groq": "Groq",
"jdcloud": "京东云",
"kuaishou-wanqing": "快手万擎",
}
RUNTIME_PACKAGE = {
"name": "moviepilot-frontend-runtime",
@@ -1200,8 +1206,6 @@ def _llm_provider_defaults(
provider_definitions: list[dict[str, Any]],
) -> dict[str, str]:
normalized_provider = str(provider or "").strip().lower()
if normalized_provider == "kimi-coding":
normalized_provider = "moonshot"
defaults = dict(LLM_PROVIDER_DEFAULTS.get(normalized_provider) or {})
provider_meta = next(
(
@@ -1233,8 +1237,6 @@ def _llm_provider_meta(
provider_definitions: list[dict[str, Any]],
) -> dict[str, Any]:
normalized_provider = str(provider or "").strip().lower()
if normalized_provider == "kimi-coding":
normalized_provider = "moonshot"
provider_meta = next(
(
item
@@ -1788,8 +1790,6 @@ def _collect_agent_config(
provider_definitions = _load_llm_provider_definitions(runtime_python=runtime_python)
provider_choices = _llm_provider_choice_map(provider_definitions)
current_provider = _env_default("LLM_PROVIDER", "deepseek").lower()
if current_provider == "kimi-coding":
current_provider = "moonshot"
if current_provider not in provider_choices:
current_provider = "deepseek"

View File

@@ -287,6 +287,61 @@ class LlmProviderRegistryTest(unittest.TestCase):
self.assertIsNone(provider.models_dev_provider_id)
self.assertFalse(provider.supports_model_refresh)
def test_builtin_provider_includes_kuaishou_wanqing_endpoint(self):
manager = LLMProviderManager()
provider = manager.get_provider("kuaishou-wanqing")
self.assertEqual(provider.name, "快手万擎")
self.assertEqual(provider.runtime, "openai_compatible")
self.assertEqual(
provider.default_base_url,
"https://wanqing.streamlakeapi.com/api/gateway/v1/endpoints",
)
self.assertEqual(
tuple((preset.id, preset.label, preset.value) for preset in provider.base_url_presets),
(
(
"kuaishou-wanqing-usage",
"按量计费",
"https://wanqing.streamlakeapi.com/api/gateway/v1/endpoints",
),
(
"kuaishou-wanqing-coding",
"Coding Plan",
"https://wanqing.streamlakeapi.com/api/gateway/coding/v1",
),
),
)
self.assertEqual(provider.model_list_strategy, "manual")
self.assertFalse(provider.supports_model_refresh)
def test_kuaishou_wanqing_coding_preset_resolves_runtime_base_url(self):
manager = LLMProviderManager()
runtime = asyncio.run(
manager.resolve_runtime(
provider_id="kuaishou-wanqing",
model="kat-coder-pro-v2",
api_key="sk-test",
base_url="https://wanqing.streamlakeapi.com/api/gateway/coding/v1",
base_url_preset_id="kuaishou-wanqing-coding",
)
)
self.assertEqual(runtime["runtime"], "openai_compatible")
self.assertEqual(
runtime["base_url"],
"https://wanqing.streamlakeapi.com/api/gateway/coding/v1",
)
def test_kuaishou_wanqing_models_are_manual_input(self):
manager = LLMProviderManager()
models = asyncio.run(manager.list_models(provider_id="kuaishou-wanqing"))
self.assertEqual(models, [])
def test_builtin_minimax_provider_merges_general_and_coding_presets(self):
manager = LLMProviderManager()

View File

@@ -185,7 +185,7 @@ class LocalSetupLlmProviderPromptTests(unittest.TestCase):
self.assertEqual(provider, "my-provider_01")
def test_fallback_provider_choices_include_baidu_and_jdcloud(self):
def test_fallback_provider_choices_include_baidu_jdcloud_and_wanqing(self):
module = load_local_setup_module()
self.assertEqual(
@@ -193,8 +193,12 @@ class LocalSetupLlmProviderPromptTests(unittest.TestCase):
"百度千帆",
)
self.assertEqual(module.LLM_PROVIDER_FALLBACK_CHOICES["jdcloud"], "京东云")
self.assertEqual(
module.LLM_PROVIDER_FALLBACK_CHOICES["kuaishou-wanqing"],
"快手万擎",
)
def test_local_setup_defaults_include_baidu_and_jdcloud_base_urls(self):
def test_local_setup_defaults_include_baidu_jdcloud_and_wanqing_base_urls(self):
module = load_local_setup_module()
self.assertEqual(
@@ -205,6 +209,14 @@ class LocalSetupLlmProviderPromptTests(unittest.TestCase):
module.LLM_PROVIDER_DEFAULTS["jdcloud"]["base_url"],
"https://modelservice.jdcloud.com/v1",
)
self.assertEqual(
module.LLM_PROVIDER_DEFAULTS["kuaishou-wanqing"]["base_url"],
"https://wanqing.streamlakeapi.com/api/gateway/v1/endpoints",
)
self.assertEqual(
module.LLM_PROVIDER_DEFAULTS["kuaishou-wanqing"]["base_url_preset"],
"kuaishou-wanqing-usage",
)
def test_collect_agent_config_prompts_for_duplicate_base_url_presets(self):
module = load_local_setup_module()