fix:减少无效搜索

This commit is contained in:
jxxghp
2025-08-01 15:18:05 +08:00
parent 0c8fd5121a
commit 104138b9a7
3 changed files with 199 additions and 214 deletions

View File

@@ -1,10 +1,6 @@
import asyncio
import random
import time
from datetime import datetime
from typing import List, Optional, Tuple, Union
from app.core.config import settings
from app.core.context import TorrentInfo
from app.db.site_oper import SiteOper
from app.helper.module import ModuleHelper
@@ -134,48 +130,33 @@ class IndexerModule(_ModuleBase):
await SiteOper().async_success(domain=domain, seconds=seconds)
@staticmethod
def __parse_result(site: dict, result_array: list, search_count: int, seconds: int) -> TorrentInfo:
def __parse_result(site: dict, result_array: list, seconds: int) -> TorrentInfo:
"""
解析搜索结果为 TorrentInfo 对象
"""
def __remove_duplicate(_torrents: List[TorrentInfo]) -> List[TorrentInfo]:
"""
去除重复的种子
:param _torrents: 种子列表
:return: 去重后的种子列表
"""
if not settings.SEARCH_MULTIPLE_NAME:
return _torrents
# 通过encosure去重
return list({f"{t.title}_{t.description}": t for t in _torrents}.values())
if not result_array or len(result_array) == 0:
logger.warn(f"{site.get('name')} 未搜索到数据,共搜索 {search_count} 次,耗时 {seconds}")
logger.warn(f"{site.get('name')} 未搜索到数据,耗时 {seconds}")
return []
else:
logger.info(
f"{site.get('name')} 搜索完成,共搜索 {search_count} 次,耗时 {seconds} 秒,返回数据:{len(result_array)}")
torrents = [TorrentInfo(site=site.get("id"),
site_name=site.get("name"),
site_cookie=site.get("cookie"),
site_ua=site.get("ua"),
site_proxy=site.get("proxy"),
site_order=site.get("pri"),
site_downloader=site.get("downloader"),
**result) for result in result_array]
# 去重
return __remove_duplicate(torrents)
logger.info(
f"{site.get('name')} 搜索完成,耗时 {seconds} 秒,返回数据:{len(result_array)}")
return [TorrentInfo(site=site.get("id"),
site_name=site.get("name"),
site_cookie=site.get("cookie"),
site_ua=site.get("ua"),
site_proxy=site.get("proxy"),
site_order=site.get("pri"),
site_downloader=site.get("downloader"),
**result) for result in result_array]
def search_torrents(self, site: dict,
keywords: List[str] = None,
keyword: str = None,
mtype: MediaType = None,
cat: Optional[str] = None,
page: Optional[int] = 0) -> List[TorrentInfo]:
"""
搜索一个站点
:param site: 站点
:param keywords: 搜索关键词列表
:param keyword: 搜索关键词
:param mtype: 媒体类型
:param cat: 分类
:param page: 页码
@@ -188,79 +169,59 @@ class IndexerModule(_ModuleBase):
start_time = datetime.now()
# 错误标志
error_flag = False
# 搜索次数
search_count = 0
for search_word in keywords or ['']:
# 检查是否可以执行搜索
if not self.__search_check(site, search_word):
continue
# 强制休眠 1-10 秒
if search_count > 0:
logger.info(f"站点 {site.get('name')} 已搜索 {search_count} 次,强制休眠 1-10 秒 ...")
time.sleep(random.randint(1, 10))
# 检查是否可以执行搜索
if not self.__search_check(site, keyword):
return []
# 去除搜索关键字中的特殊字符
search_word = self.__clear_search_text(search_word)
# 去除搜索关键字中的特殊字符
search_word = self.__clear_search_text(keyword)
# 开始搜索
try:
if site.get('parser') == "TNodeSpider":
error_flag, result = TNodeSpider(site).search(
keyword=search_word,
page=page
)
elif site.get('parser') == "TorrentLeech":
error_flag, result = TorrentLeech(site).search(
keyword=search_word,
page=page
)
elif site.get('parser') == "mTorrent":
error_flag, result = MTorrentSpider(site).search(
keyword=search_word,
mtype=mtype,
page=page
)
elif site.get('parser') == "Yema":
error_flag, result = YemaSpider(site).search(
keyword=search_word,
mtype=mtype,
page=page
)
elif site.get('parser') == "Haidan":
error_flag, result = HaiDanSpider(site).search(
keyword=search_word,
mtype=mtype
)
elif site.get('parser') == "HDDolby":
error_flag, result = HddolbySpider(site).search(
keyword=search_word,
mtype=mtype,
page=page
)
else:
error_flag, result = self.__spider_search(
search_word=search_word,
indexer=site,
mtype=mtype,
cat=cat,
page=page
)
if error_flag:
break
if not result:
continue
if settings.SEARCH_MULTIPLE_NAME:
# 合并多个结果
result_array.extend(result)
else:
# 有结果就停止
result_array = result
break
except Exception as err:
logger.error(f"{site.get('name')} 搜索出错:{str(err)}")
finally:
search_count += 1
# 开始搜索
try:
if site.get('parser') == "TNodeSpider":
error_flag, result = TNodeSpider(site).search(
keyword=search_word,
page=page
)
elif site.get('parser') == "TorrentLeech":
error_flag, result = TorrentLeech(site).search(
keyword=search_word,
page=page
)
elif site.get('parser') == "mTorrent":
error_flag, result = MTorrentSpider(site).search(
keyword=search_word,
mtype=mtype,
page=page
)
elif site.get('parser') == "Yema":
error_flag, result = YemaSpider(site).search(
keyword=search_word,
mtype=mtype,
page=page
)
elif site.get('parser') == "Haidan":
error_flag, result = HaiDanSpider(site).search(
keyword=search_word,
mtype=mtype
)
elif site.get('parser') == "HDDolby":
error_flag, result = HddolbySpider(site).search(
keyword=search_word,
mtype=mtype,
page=page
)
else:
error_flag, result = self.__spider_search(
search_word=search_word,
indexer=site,
mtype=mtype,
cat=cat,
page=page
)
except Exception as err:
logger.error(f"{site.get('name')} 搜索出错:{str(err)}")
# 索引花费的时间
seconds = (datetime.now() - start_time).seconds
@@ -272,19 +233,18 @@ class IndexerModule(_ModuleBase):
return self.__parse_result(
site=site,
result_array=result_array,
search_count=search_count,
seconds=seconds
)
async def async_search_torrents(self, site: dict,
keywords: List[str] = None,
keyword: str = None,
mtype: MediaType = None,
cat: Optional[str] = None,
page: Optional[int] = 0) -> List[TorrentInfo]:
"""
异步搜索一个站点
:param site: 站点
:param keywords: 搜索关键词列表
:param keyword: 搜索关键词
:param mtype: 媒体类型
:param cat: 分类
:param page: 页码
@@ -297,82 +257,59 @@ class IndexerModule(_ModuleBase):
start_time = datetime.now()
# 错误标志
error_flag = False
# 搜索次数
search_count = 0
# 遍历搜索关键字
for search_word in keywords or ['']:
# 检查是否可以执行搜索
if not self.__search_check(site, search_word):
continue
# 强制休眠 1-10 秒
if search_count > 0:
logger.info(f"站点 {site.get('name')} 已搜索 {search_count} 次,强制休眠 1-10 秒 ...")
await asyncio.sleep(random.randint(1, 10))
# 检查是否可以执行搜索
if not self.__search_check(site, keyword):
return []
# 去除搜索关键字中的特殊字符
search_word = self.__clear_search_text(search_word)
# 去除搜索关键字中的特殊字符
search_word = self.__clear_search_text(keyword)
# 开始搜索
try:
if site.get('parser') == "TNodeSpider":
error_flag, result = await TNodeSpider(site).async_search(
keyword=search_word,
page=page
)
elif site.get('parser') == "TorrentLeech":
error_flag, result = await TorrentLeech(site).async_search(
keyword=search_word,
page=page
)
elif site.get('parser') == "mTorrent":
error_flag, result = await MTorrentSpider(site).async_search(
keyword=search_word,
mtype=mtype,
page=page
)
elif site.get('parser') == "Yema":
error_flag, result = await YemaSpider(site).async_search(
keyword=search_word,
mtype=mtype,
page=page
)
elif site.get('parser') == "Haidan":
error_flag, result = await HaiDanSpider(site).async_search(
keyword=search_word,
mtype=mtype
)
elif site.get('parser') == "HDDolby":
error_flag, result = await HddolbySpider(site).async_search(
keyword=search_word,
mtype=mtype,
page=page
)
else:
error_flag, result = await self.__async_spider_search(
search_word=search_word,
indexer=site,
mtype=mtype,
cat=cat,
page=page
)
if error_flag:
break
if not result:
continue
if settings.SEARCH_MULTIPLE_NAME:
# 合并多个结果
result_array.extend(result)
else:
# 有结果就停止
result_array = result
break
except Exception as err:
logger.error(f"{site.get('name')} 搜索出错:{str(err)}")
finally:
search_count += 1
# 开始搜索
try:
if site.get('parser') == "TNodeSpider":
error_flag, result = await TNodeSpider(site).async_search(
keyword=search_word,
page=page
)
elif site.get('parser') == "TorrentLeech":
error_flag, result = await TorrentLeech(site).async_search(
keyword=search_word,
page=page
)
elif site.get('parser') == "mTorrent":
error_flag, result = await MTorrentSpider(site).async_search(
keyword=search_word,
mtype=mtype,
page=page
)
elif site.get('parser') == "Yema":
error_flag, result = await YemaSpider(site).async_search(
keyword=search_word,
mtype=mtype,
page=page
)
elif site.get('parser') == "Haidan":
error_flag, result = await HaiDanSpider(site).async_search(
keyword=search_word,
mtype=mtype
)
elif site.get('parser') == "HDDolby":
error_flag, result = await HddolbySpider(site).async_search(
keyword=search_word,
mtype=mtype,
page=page
)
else:
error_flag, result = await self.__async_spider_search(
search_word=search_word,
indexer=site,
mtype=mtype,
cat=cat,
page=page
)
except Exception as err:
logger.error(f"{site.get('name')} 搜索出错:{str(err)}")
# 索引花费的时间
seconds = (datetime.now() - start_time).seconds
@@ -384,7 +321,6 @@ class IndexerModule(_ModuleBase):
return self.__parse_result(
site=site,
result_array=result_array,
search_count=search_count,
seconds=seconds
)