Merge pull request #5339 from PKC278/v2

This commit is contained in:
jxxghp
2026-01-10 07:42:38 +08:00
committed by GitHub
6 changed files with 497 additions and 1 deletions

View File

@@ -44,6 +44,7 @@ class SiteChain(ChainBase):
"star-space.net": self.__indexphp_test,
"yemapt.org": self.__yema_test,
"hddolby.com": self.__hddolby_test,
"rousi.pro": self.__rousi_test,
}
def refresh_userdata(self, site: dict = None) -> Optional[SiteUserData]:
@@ -249,6 +250,32 @@ class SiteChain(ChainBase):
else:
return False, f"错误:{res.status_code} {res.reason}"
@staticmethod
def __rousi_test(site: Site) -> Tuple[bool, str]:
"""
判断站点是否已经登陆rousi
"""
url = f"https://{StringUtils.get_url_domain(site.url)}/api/v1/profile"
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"Bearer {site.apikey}",
}
res = RequestUtils(
headers=headers,
proxies=settings.PROXY if site.proxy else None,
timeout=site.timeout or 15
).get_res(url=url)
if res is None:
return False, "无法打开网站!"
if res.status_code == 200:
user_info = res.json()
if user_info and user_info.get("code") == 0:
return True, "连接成功"
return False, "APIKEY已过期"
else:
return False, f"错误:{res.status_code} {res.reason}"
@staticmethod
def __parse_favicon(url: str, cookie: str, ua: str) -> Tuple[str, Optional[str]]:
"""

View File

@@ -278,7 +278,7 @@ class ConfigModel(BaseModel):
# 搜索多个名称
SEARCH_MULTIPLE_NAME: bool = False
# 最大搜索名称数量
MAX_SEARCH_NAME_LIMIT: int = 2
MAX_SEARCH_NAME_LIMIT: int = 3
# ==================== 下载配置 ====================
# 种子标签

View File

@@ -12,6 +12,7 @@ from app.modules.indexer.spider import SiteSpider
from app.modules.indexer.spider.haidan import HaiDanSpider
from app.modules.indexer.spider.hddolby import HddolbySpider
from app.modules.indexer.spider.mtorrent import MTorrentSpider
from app.modules.indexer.spider.rousi import RousiSpider
from app.modules.indexer.spider.tnode import TNodeSpider
from app.modules.indexer.spider.torrentleech import TorrentLeech
from app.modules.indexer.spider.yema import YemaSpider
@@ -212,6 +213,13 @@ class IndexerModule(_ModuleBase):
mtype=mtype,
page=page
)
elif site.get('parser') == "RousiPro":
error_flag, result = RousiSpider(site).search(
keyword=search_word,
mtype=mtype,
cat=cat,
page=page
)
else:
error_flag, result = self.__spider_search(
search_word=search_word,
@@ -300,6 +308,13 @@ class IndexerModule(_ModuleBase):
mtype=mtype,
page=page
)
elif site.get('parser') == "RousiPro":
error_flag, result = await RousiSpider(site).async_search(
keyword=search_word,
mtype=mtype,
cat=cat,
page=page
)
else:
error_flag, result = await self.__async_spider_search(
search_word=search_word,

View File

@@ -35,6 +35,7 @@ class SiteSchema(Enum):
HDDolby = "HDDolby"
Zhixing = "Zhixing"
Bitpt = "Bitpt"
RousiPro = "RousiPro"
class SiteParserBase(metaclass=ABCMeta):

View File

@@ -0,0 +1,164 @@
# -*- coding: utf-8 -*-
import json
from typing import Optional, Tuple
from app.log import logger
from app.modules.indexer.parser import SiteParserBase, SiteSchema
from app.utils.string import StringUtils
class RousiSiteUserInfo(SiteParserBase):
"""
Rousi.pro 站点解析器
使用 API v1 接口,通过 Passkey (Bearer Token) 进行认证
"""
schema = SiteSchema.RousiPro
request_mode = "apikey"
def _parse_site_page(self, html_text: str):
"""
配置 API 请求地址和请求头
使用 API v1 的 /profile 接口获取用户信息
"""
self._base_url = f"https://{StringUtils.get_url_domain(self._site_url)}"
self._user_basic_page = "api/v1/profile?include_fields[user]=seeding_leeching_data"
self._user_basic_params = {}
self._user_basic_headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"Bearer {self.apikey}"
}
# Rousi.pro API v1 在单个接口返回所有信息,无需额外页面
self._user_traffic_page = None
self._user_detail_page = None
self._torrent_seeding_page = None
self._user_mail_unread_page = None
self._sys_mail_unread_page = None
def _parse_logged_in(self, html_text):
"""
判断是否登录成功
API 认证模式下,通过 HTTP 状态码判断,此处始终返回 True
"""
return True
def _parse_user_base_info(self, html_text: str):
"""
解析用户基本信息
通过 API v1 接口获取用户完整信息,包括上传下载量、做种数据等
API 响应示例:
{
"code": 0,
"message": "success",
"data": {
"id": 1,
"username": "example",
"level_text": "Lv.5",
"registered_at": "2024-01-01T00:00:00Z",
"uploaded": 1073741824,
"downloaded": 536870912,
"ratio": 2.0,
"karma": 1000.5,
"seeding_leeching_data": {
"seeding_count": 10,
"seeding_size": 10737418240,
"leeching_count": 2,
"leeching_size": 2147483648
}
}
}
"""
if not html_text:
return
try:
data = json.loads(html_text)
except json.JSONDecodeError:
logger.error(f"{self._site_name} JSON 解析失败")
return
if not data or data.get("code") != 0:
self.err_msg = data.get("message", "未知错误")
logger.warn(f"{self._site_name} API 错误: {self.err_msg}")
return
user_info = data.get("data")
if not user_info:
return
# 基本信息
self.userid = user_info.get("id")
self.username = user_info.get("username")
self.user_level = user_info.get("level_text") or user_info.get("role_text")
# 注册时间:统一格式为 YYYY-MM-DD HH:MM:SS
join_at = StringUtils.unify_datetime_str(user_info.get("registered_at"))
if join_at:
# 确保格式为 YYYY-MM-DD HH:MM:SS (19位)
if len(join_at) >= 19:
self.join_at = join_at[:19]
else:
self.join_at = join_at
# 流量信息
self.upload = int(user_info.get("uploaded") or 0)
self.download = int(user_info.get("downloaded") or 0)
self.ratio = round(float(user_info.get("ratio") or 0), 2)
# 魔力值(站点称为 karma
self.bonus = float(user_info.get("karma") or 0)
# 做种/下载中数据
sl_data = user_info.get("seeding_leeching_data", {})
self.seeding = int(sl_data.get("seeding_count") or 0)
self.seeding_size = int(sl_data.get("seeding_size") or 0)
self.leeching = int(sl_data.get("leeching_count") or 0)
self.leeching_size = int(sl_data.get("leeching_size") or 0)
def _parse_user_traffic_info(self, html_text: str):
"""
解析用户流量信息
Rousi.pro API v1 在 _parse_user_base_info 中已完成所有解析,此方法无需实现
"""
pass
def _parse_user_detail_info(self, html_text: str):
"""
解析用户详细信息
Rousi.pro API v1 在 _parse_user_base_info 中已完成所有解析,此方法无需实现
"""
pass
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: Optional[bool] = False) -> Optional[str]:
"""
解析用户做种信息
Rousi.pro API v1 在 _parse_user_base_info 中已通过 seeding_leeching_data 获取做种数据
:param html_text: 页面内容
:param multi_page: 是否多页数据
:return: 下页地址(无下页返回 None
"""
return None
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
"""
解析未读消息链接
Rousi.pro API v1 暂未提供消息相关接口
:param html_text: 页面内容
:param msg_links: 消息链接列表
:return: 下页地址(无下页返回 None
"""
return None
def _parse_message_content(self, html_text) -> Tuple[Optional[str], Optional[str], Optional[str]]:
"""
解析消息内容
Rousi.pro API v1 暂未提供消息相关接口
:param html_text: 页面内容
:return: (标题, 日期, 内容)
"""
return None, None, None

View File

@@ -0,0 +1,289 @@
import base64
import json
from typing import List, Optional, Tuple
from app.core.config import settings
from app.db.systemconfig_oper import SystemConfigOper
from app.log import logger
from app.schemas import MediaType
from app.utils.http import RequestUtils, AsyncRequestUtils
from app.utils.string import StringUtils
class RousiSpider:
"""
Rousi.pro API v1 Spider
使用 API v1 接口进行种子搜索
- 认证方式Bearer Token (Passkey)
- 搜索接口:/api/v1/torrents
- 详情接口:/api/v1/torrents/:id
"""
_indexerid = None
_domain = None
_url = None
_name = ""
_proxy = None
_cookie = None
_ua = None
_size = 100
_searchurl = "https://%s/api/v1/torrents"
_downloadurl = "https://%s/api/v1/torrents/%s"
_timeout = 15
# 分类定义
# API 不支持多分类搜索,每次只使用一个分类
_movie_category = 'movie'
_tv_category = 'tv'
# API KEY
_apikey = None
def __init__(self, indexer: dict):
self.systemconfig = SystemConfigOper()
if indexer:
self._indexerid = indexer.get('id')
self._url = indexer.get('domain')
self._domain = StringUtils.get_url_domain(self._url)
self._searchurl = self._searchurl % self._domain
self._downloadurl = self._downloadurl % (self._domain, "%s")
self._name = indexer.get('name')
if indexer.get('proxy'):
self._proxy = settings.PROXY
self._cookie = indexer.get('cookie')
self._ua = indexer.get('ua')
self._apikey = indexer.get('apikey')
self._timeout = indexer.get('timeout') or 15
def __get_params(self, keyword: str, mtype: MediaType = None, cat: Optional[str] = None, page: Optional[int] = 0) -> dict:
"""
构建 API 请求参数
:param keyword: 搜索关键词
:param mtype: 媒体类型 (MOVIE/TV)
:param cat: 用户选择的分类 ID逗号分隔的字符串
:param page: 页码(从 0 开始API 需要从 1 开始)
:return: 请求参数字典
"""
params = {
"page": int(page) + 1,
"page_size": self._size
}
if keyword:
params["keyword"] = keyword
# API 不支持多分类搜索,只使用单个 category 参数
# 优先使用用户选择的分类,如果用户未选择则根据 mtype 推断
if cat:
# 用户选择了特定分类,需要将分类 ID 映射回 API 的 category name
category_names = self.__get_category_names_by_ids(cat)
if category_names:
# 如果用户选择了多个分类,只取第一个
params["category"] = category_names[0]
elif mtype:
# 用户未选择分类,根据媒体类型推断
if mtype == MediaType.MOVIE:
params["category"] = self._movie_category
elif mtype == MediaType.TV:
params["category"] = self._tv_category
return params
def __get_category_names_by_ids(self, cat: str) -> Optional[list]:
"""
根据用户选择的分类 ID 获取 API 的 category names
:param cat: 用户选择的分类 ID逗号分隔的多个ID"1,2,3"
:return: API 的 category names 列表(如 ["movie", "tv", "documentary"]
"""
if not cat:
return None
# ID 到 category name 的映射
id_to_name = {
'1': 'movie',
'2': 'tv',
'3': 'documentary',
'4': 'animation',
'6': 'variety'
}
# 分割多个分类 ID 并映射为 category names
cat_ids = [c.strip() for c in cat.split(',') if c.strip()]
category_names = [id_to_name.get(cat_id) for cat_id in cat_ids if cat_id in id_to_name]
return category_names if category_names else None
def __process_response(self, res) -> Tuple[bool, List[dict]]:
"""
处理 API 响应
:param res: 请求响应对象
:return: (是否发生错误, 种子列表)
"""
if res and res.status_code == 200:
try:
data = res.json()
if data.get('code') == 0:
results = data.get('data', {}).get('torrents', [])
return False, self.__parse_result(results)
else:
logger.warn(f"{self._name} 搜索失败,错误信息:{data.get('message')}")
return True, []
except Exception as e:
logger.warn(f"{self._name} 解析响应失败:{e}")
return True, []
elif res is not None:
logger.warn(f"{self._name} 搜索失败HTTP 错误码:{res.status_code}")
return True, []
else:
logger.warn(f"{self._name} 搜索失败,无法连接 {self._domain}")
return True, []
def __parse_result(self, results: List[dict]) -> List[dict]:
"""
解析搜索结果
将 API 返回的种子数据转换为 MoviePilot 标准格式
:param results: API 返回的种子列表
:return: 标准化的种子信息列表
"""
torrents = []
if not results:
return torrents
for result in results:
# 解析分类信息
raw_cat = result.get('category')
cat_val = None
category = MediaType.UNKNOWN.value
if isinstance(raw_cat, dict):
cat_val = raw_cat.get('slug') or raw_cat.get('name')
elif isinstance(raw_cat, str):
cat_val = raw_cat
if cat_val:
cat_val = str(cat_val).lower()
if cat_val == self._movie_category:
category = MediaType.MOVIE.value
elif cat_val == self._tv_category:
category = MediaType.TV.value
else:
category = MediaType.UNKNOWN.value
# 解析促销信息
# API 后端已处理全站促销优先级,直接使用返回的 promotion 数据
downloadvolumefactor = 1.0
uploadvolumefactor = 1.0
freedate = None
promotion = result.get('promotion')
if promotion and promotion.get('is_active'):
downloadvolumefactor = float(promotion.get('down_multiplier', 1.0))
uploadvolumefactor = float(promotion.get('up_multiplier', 1.0))
# 促销到期时间,格式化为 YYYY-MM-DD HH:MM:SS
if promotion.get('until'):
freedate = StringUtils.unify_datetime_str(promotion.get('until'))
torrent = {
'title': result.get('title'),
'description': result.get('subtitle'),
'enclosure': self.__get_download_url(result.get('id')),
'pubdate': StringUtils.unify_datetime_str(result.get('created_at')),
'size': int(result.get('size') or 0),
'seeders': int(result.get('seeders') or 0),
'peers': int(result.get('leechers') or 0),
'grabs': int(result.get('downloads') or 0),
'downloadvolumefactor': downloadvolumefactor,
'uploadvolumefactor': uploadvolumefactor,
'freedate': freedate,
'page_url': f"https://{self._domain}/torrent/{result.get('uuid')}",
'labels': [],
'category': category
}
torrents.append(torrent)
return torrents
def search(self, keyword: str, mtype: MediaType = None, cat: Optional[str] = None, page: Optional[int] = 0) -> Tuple[bool, List[dict]]:
"""
同步搜索种子
:param keyword: 搜索关键词
:param mtype: 媒体类型 (MOVIE/TV)
:param cat: 用户选择的分类 ID逗号分隔
:param page: 页码(从 0 开始)
:return: (是否发生错误, 种子列表)
"""
if not self._apikey:
logger.warn(f"{self._name} 未配置 API Key (Passkey)")
return True, []
params = self.__get_params(keyword, mtype, cat, page)
headers = {
"Authorization": f"Bearer {self._apikey}",
"Accept": "application/json"
}
res = RequestUtils(
headers=headers,
proxies=self._proxy,
timeout=self._timeout
).get_res(url=self._searchurl, params=params)
return self.__process_response(res)
async def async_search(self, keyword: str, mtype: MediaType = None, cat: Optional[str] = None, page: Optional[int] = 0) -> Tuple[bool, List[dict]]:
"""
异步搜索种子
:param keyword: 搜索关键词
:param mtype: 媒体类型 (MOVIE/TV)
:param cat: 用户选择的分类 ID逗号分隔
:param page: 页码(从 0 开始)
:return: (是否发生错误, 种子列表)
"""
if not self._apikey:
logger.warn(f"{self._name} 未配置 API Key (Passkey)")
return True, []
params = self.__get_params(keyword, mtype, cat, page)
headers = {
"Authorization": f"Bearer {self._apikey}",
"Accept": "application/json"
}
res = await AsyncRequestUtils(
headers=headers,
proxies=self._proxy,
timeout=self._timeout
).get_res(url=self._searchurl, params=params)
return self.__process_response(res)
def __get_download_url(self, torrent_id: int) -> str:
"""
构建种子下载链接
使用 base64 编码的方式告诉 MoviePilot 如何获取真实下载地址
MoviePilot 会先请求详情接口,然后从响应中提取 data.download_url
:param torrent_id: 种子 ID
:return: base64 编码的请求配置字符串 + 详情接口 URL
"""
url = self._downloadurl % torrent_id
# MoviePilot 会解析这个特殊格式的 URL
# 1. 使用指定的 method 和 header 请求 URL
# 2. 从 JSON 响应中提取 result 指定的字段值作为真实下载地址
params = {
'method': 'get',
'header': {
'Authorization': f'Bearer {self._apikey}',
'Accept': 'application/json'
},
'result': 'data.download_url'
}
base64_str = base64.b64encode(json.dumps(params).encode('utf-8')).decode('utf-8')
return f"[{base64_str}]{url}"