feat: 新增 RousiPro 站点支持

This commit is contained in:
PKC278
2026-01-09 22:08:24 +08:00
parent 57d4786a7f
commit cb3cef70e5
5 changed files with 459 additions and 0 deletions

View File

@@ -12,6 +12,7 @@ from app.modules.indexer.spider import SiteSpider
from app.modules.indexer.spider.haidan import HaiDanSpider
from app.modules.indexer.spider.hddolby import HddolbySpider
from app.modules.indexer.spider.mtorrent import MTorrentSpider
from app.modules.indexer.spider.rousi import RousiSpider
from app.modules.indexer.spider.tnode import TNodeSpider
from app.modules.indexer.spider.torrentleech import TorrentLeech
from app.modules.indexer.spider.yema import YemaSpider
@@ -212,6 +213,12 @@ class IndexerModule(_ModuleBase):
mtype=mtype,
page=page
)
elif site.get('parser') == "RousiPro":
error_flag, result = RousiSpider(site).search(
keyword=search_word,
mtype=mtype,
page=page
)
else:
error_flag, result = self.__spider_search(
search_word=search_word,
@@ -300,6 +307,12 @@ class IndexerModule(_ModuleBase):
mtype=mtype,
page=page
)
elif site.get('parser') == "RousiPro":
error_flag, result = await RousiSpider(site).async_search(
keyword=search_word,
mtype=mtype,
page=page
)
else:
error_flag, result = await self.__async_spider_search(
search_word=search_word,

View File

@@ -35,6 +35,7 @@ class SiteSchema(Enum):
HDDolby = "HDDolby"
Zhixing = "Zhixing"
Bitpt = "Bitpt"
RousiPro = "RousiPro"
class SiteParserBase(metaclass=ABCMeta):

View File

@@ -0,0 +1,164 @@
# -*- coding: utf-8 -*-
import json
from typing import Optional, Tuple
from app.log import logger
from app.modules.indexer.parser import SiteParserBase, SiteSchema
from app.utils.string import StringUtils
class RousiSiteUserInfo(SiteParserBase):
"""
Rousi.pro 站点解析器
使用 API v1 接口,通过 Passkey (Bearer Token) 进行认证
"""
schema = SiteSchema.RousiPro
request_mode = "apikey"
def _parse_site_page(self, html_text: str):
"""
配置 API 请求地址和请求头
使用 API v1 的 /profile 接口获取用户信息
"""
self._base_url = f"https://{StringUtils.get_url_domain(self._site_url)}"
self._user_basic_page = "api/v1/profile?include_fields[user]=seeding_leeching_data"
self._user_basic_params = {}
self._user_basic_headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"Bearer {self.apikey}"
}
# Rousi.pro API v1 在单个接口返回所有信息,无需额外页面
self._user_traffic_page = None
self._user_detail_page = None
self._torrent_seeding_page = None
self._user_mail_unread_page = None
self._sys_mail_unread_page = None
def _parse_logged_in(self, html_text):
"""
判断是否登录成功
API 认证模式下,通过 HTTP 状态码判断,此处始终返回 True
"""
return True
def _parse_user_base_info(self, html_text: str):
"""
解析用户基本信息
通过 API v1 接口获取用户完整信息,包括上传下载量、做种数据等
API 响应示例:
{
"code": 0,
"message": "success",
"data": {
"id": 1,
"username": "example",
"level_text": "Lv.5",
"registered_at": "2024-01-01T00:00:00Z",
"uploaded": 1073741824,
"downloaded": 536870912,
"ratio": 2.0,
"karma": 1000.5,
"seeding_leeching_data": {
"seeding_count": 10,
"seeding_size": 10737418240,
"leeching_count": 2,
"leeching_size": 2147483648
}
}
}
"""
if not html_text:
return
try:
data = json.loads(html_text)
except json.JSONDecodeError:
logger.error(f"{self._site_name} JSON 解析失败")
return
if not data or data.get("code") != 0:
self.err_msg = data.get("message", "未知错误")
logger.warn(f"{self._site_name} API 错误: {self.err_msg}")
return
user_info = data.get("data")
if not user_info:
return
# 基本信息
self.userid = user_info.get("id")
self.username = user_info.get("username")
self.user_level = user_info.get("level_text") or user_info.get("role_text")
# 注册时间:统一格式为 YYYY-MM-DD HH:MM:SS
join_at = StringUtils.unify_datetime_str(user_info.get("registered_at"))
if join_at:
# 确保格式为 YYYY-MM-DD HH:MM:SS (19位)
if len(join_at) >= 19:
self.join_at = join_at[:19]
else:
self.join_at = join_at
# 流量信息
self.upload = int(user_info.get("uploaded") or 0)
self.download = int(user_info.get("downloaded") or 0)
self.ratio = round(float(user_info.get("ratio") or 0), 2)
# 魔力值(站点称为 karma
self.bonus = float(user_info.get("karma") or 0)
# 做种/下载中数据
sl_data = user_info.get("seeding_leeching_data", {})
self.seeding = int(sl_data.get("seeding_count") or 0)
self.seeding_size = int(sl_data.get("seeding_size") or 0)
self.leeching = int(sl_data.get("leeching_count") or 0)
self.leeching_size = int(sl_data.get("leeching_size") or 0)
def _parse_user_traffic_info(self, html_text: str):
"""
解析用户流量信息
Rousi.pro API v1 在 _parse_user_base_info 中已完成所有解析,此方法无需实现
"""
pass
def _parse_user_detail_info(self, html_text: str):
"""
解析用户详细信息
Rousi.pro API v1 在 _parse_user_base_info 中已完成所有解析,此方法无需实现
"""
pass
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: Optional[bool] = False) -> Optional[str]:
"""
解析用户做种信息
Rousi.pro API v1 在 _parse_user_base_info 中已通过 seeding_leeching_data 获取做种数据
:param html_text: 页面内容
:param multi_page: 是否多页数据
:return: 下页地址(无下页返回 None
"""
return None
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
"""
解析未读消息链接
Rousi.pro API v1 暂未提供消息相关接口
:param html_text: 页面内容
:param msg_links: 消息链接列表
:return: 下页地址(无下页返回 None
"""
return None
def _parse_message_content(self, html_text) -> Tuple[Optional[str], Optional[str], Optional[str]]:
"""
解析消息内容
Rousi.pro API v1 暂未提供消息相关接口
:param html_text: 页面内容
:return: (标题, 日期, 内容)
"""
return None, None, None

View File

@@ -0,0 +1,254 @@
import base64
import json
from typing import List, Optional, Tuple
from app.core.config import settings
from app.db.systemconfig_oper import SystemConfigOper
from app.log import logger
from app.schemas import MediaType
from app.utils.http import RequestUtils, AsyncRequestUtils
from app.utils.singleton import SingletonClass
from app.utils.string import StringUtils
class RousiSpider(metaclass=SingletonClass):
"""
Rousi.pro API v1 Spider
使用 API v1 接口进行种子搜索
- 认证方式Bearer Token (Passkey)
- 搜索接口:/api/v1/torrents
- 详情接口:/api/v1/torrents/:id
"""
_indexerid = None
_domain = None
_url = None
_name = ""
_proxy = None
_cookie = None
_ua = None
_size = 20
_searchurl = "https://%s/api/v1/torrents"
_downloadurl = "https://%s/api/v1/torrents/%s"
_timeout = 15
_apikey = None
def __init__(self, indexer: dict):
self.systemconfig = SystemConfigOper()
if indexer:
self._indexerid = indexer.get('id')
self._url = indexer.get('domain')
self._domain = StringUtils.get_url_domain(self._url)
self._searchurl = self._searchurl % self._domain
self._downloadurl = self._downloadurl % (self._domain, "%s")
self._name = indexer.get('name')
if indexer.get('proxy'):
self._proxy = settings.PROXY
self._cookie = indexer.get('cookie')
self._ua = indexer.get('ua')
self._apikey = indexer.get('apikey')
self._timeout = indexer.get('timeout') or 15
def __get_params(self, keyword: str, mtype: MediaType = None, page: Optional[int] = 0) -> dict:
"""
构建 API 请求参数
:param keyword: 搜索关键词
:param mtype: 媒体类型 (MOVIE/TV)
:param page: 页码(从 0 开始API 需要从 1 开始)
:return: 请求参数字典
"""
params = {
"page": int(page) + 1,
"page_size": self._size
}
if keyword:
params["keyword"] = keyword
# 注意API 的 category 参数为字符串类型,仅支持单个分类
# 这里优先选择主分类,如需多分类搜索需要分别请求
if mtype:
if mtype == MediaType.MOVIE:
params["category"] = "movie"
elif mtype == MediaType.TV:
params["category"] = "tv"
return params
def __parse_result(self, results: List[dict]) -> List[dict]:
"""
解析搜索结果
将 API 返回的种子数据转换为 MoviePilot 标准格式
:param results: API 返回的种子列表
:return: 标准化的种子信息列表
"""
torrents = []
if not results:
return torrents
for result in results:
# 解析分类信息
raw_cat = result.get('category')
cat_val = None
category = MediaType.UNKNOWN.value
if isinstance(raw_cat, dict):
cat_val = raw_cat.get('slug') or raw_cat.get('name')
elif isinstance(raw_cat, str):
cat_val = raw_cat
if cat_val:
cat_val = str(cat_val).lower()
if cat_val in ['movie', 'documentary']:
category = MediaType.MOVIE.value
elif cat_val in ['tv', 'animation', 'variety', 'sports']:
category = MediaType.TV.value
# 解析促销信息
# API 后端已处理全站促销优先级,直接使用返回的 promotion 数据
downloadvolumefactor = 1.0
uploadvolumefactor = 1.0
freedate = None
promotion = result.get('promotion')
if promotion and promotion.get('is_active'):
downloadvolumefactor = float(promotion.get('down_multiplier', 1.0))
uploadvolumefactor = float(promotion.get('up_multiplier', 1.0))
# 促销到期时间,格式化为 YYYY-MM-DD HH:MM:SS
if promotion.get('until'):
freedate = StringUtils.unify_datetime_str(promotion.get('until'))
torrent = {
'title': result.get('title'),
'description': result.get('subtitle'),
'enclosure': self.__get_download_url(result.get('id')),
'pubdate': StringUtils.unify_datetime_str(result.get('created_at')),
'size': int(result.get('size') or 0),
'seeders': int(result.get('seeders') or 0),
'peers': int(result.get('leechers') or 0),
'grabs': int(result.get('downloads') or 0),
'downloadvolumefactor': downloadvolumefactor,
'uploadvolumefactor': uploadvolumefactor,
'freedate': freedate,
'page_url': f"https://{self._domain}/torrent/{result.get('uuid')}",
'labels': [],
'category': category
}
torrents.append(torrent)
return torrents
def search(self, keyword: str, mtype: MediaType = None, page: Optional[int] = 0) -> Tuple[bool, List[dict]]:
"""
同步搜索种子
:param keyword: 搜索关键词
:param mtype: 媒体类型 (MOVIE/TV)
:param page: 页码(从 0 开始)
:return: (是否发生错误, 种子列表)
"""
if not self._apikey:
logger.warn(f"{self._name} 未配置 API Key (Passkey)")
return True, []
params = self.__get_params(keyword, mtype, page)
headers = {
"Authorization": f"Bearer {self._apikey}",
"Accept": "application/json"
}
res = RequestUtils(
headers=headers,
proxies=self._proxy,
timeout=self._timeout
).get_res(url=self._searchurl, params=params)
if res and res.status_code == 200:
try:
data = res.json()
if data.get('code') == 0:
results = data.get('data', {}).get('torrents', [])
return False, self.__parse_result(results)
else:
logger.warn(f"{self._name} 搜索失败,错误信息:{data.get('message')}")
return True, []
except Exception as e:
logger.warn(f"{self._name} 解析响应失败:{e}")
return True, []
elif res is not None:
logger.warn(f"{self._name} 搜索失败HTTP 错误码:{res.status_code}")
return True, []
else:
logger.warn(f"{self._name} 搜索失败,无法连接 {self._domain}")
return True, []
async def async_search(self, keyword: str, mtype: MediaType = None, page: Optional[int] = 0) -> Tuple[bool, List[dict]]:
"""
异步搜索种子
:param keyword: 搜索关键词
:param mtype: 媒体类型 (MOVIE/TV)
:param page: 页码(从 0 开始)
:return: (是否发生错误, 种子列表)
"""
if not self._apikey:
logger.warn(f"{self._name} 未配置 API Key (Passkey)")
return True, []
params = self.__get_params(keyword, mtype, page)
headers = {
"Authorization": f"Bearer {self._apikey}",
"Accept": "application/json"
}
res = await AsyncRequestUtils(
headers=headers,
proxies=self._proxy,
timeout=self._timeout
).get_res(url=self._searchurl, params=params)
if res and res.status_code == 200:
try:
data = res.json()
if data.get('code') == 0:
results = data.get('data', {}).get('torrents', [])
return False, self.__parse_result(results)
else:
logger.warn(f"{self._name} 搜索失败,错误信息:{data.get('message')}")
return True, []
except Exception as e:
logger.warn(f"{self._name} 解析响应失败:{e}")
return True, []
elif res is not None:
logger.warn(f"{self._name} 搜索失败HTTP 错误码:{res.status_code}")
return True, []
else:
logger.warn(f"{self._name} 搜索失败,无法连接 {self._domain}")
return True, []
def __get_download_url(self, torrent_id: int) -> str:
"""
构建种子下载链接
使用 base64 编码的方式告诉 MoviePilot 如何获取真实下载地址
MoviePilot 会先请求详情接口,然后从响应中提取 data.download_url
:param torrent_id: 种子 ID
:return: base64 编码的请求配置字符串 + 详情接口 URL
"""
url = self._downloadurl % torrent_id
# MoviePilot 会解析这个特殊格式的 URL
# 1. 使用指定的 method 和 header 请求 URL
# 2. 从 JSON 响应中提取 result 指定的字段值作为真实下载地址
params = {
'method': 'get',
'header': {
'Authorization': f'Bearer {self._apikey}',
'Accept': 'application/json'
},
'result': 'data.download_url'
}
base64_str = base64.b64encode(json.dumps(params).encode('utf-8')).decode('utf-8')
return f"[{base64_str}]{url}"