增加缓存管理页面

This commit is contained in:
madrays
2025-05-29 00:46:11 +08:00
parent c91ab7a76b
commit 13023141bc

View File

@@ -5,13 +5,14 @@ import tempfile
from collections import deque
from datetime import datetime
from pathlib import Path
from typing import Optional, Union, Annotated
from typing import Optional, Union, Annotated, List
import aiofiles
import pillow_avif # noqa 用于自动注册AVIF支持
from PIL import Image
from fastapi import APIRouter, Depends, HTTPException, Header, Request, Response
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from app import schemas
from app.chain.search import SearchChain
@@ -32,7 +33,7 @@ from app.helper.subscribe import SubscribeHelper
from app.log import logger
from app.monitor import Monitor
from app.scheduler import Scheduler
from app.schemas.types import SystemConfigKey
from app.schemas.types import SystemConfigKey, MediaType
from app.utils.crypto import HashUtils
from app.utils.http import RequestUtils
from app.utils.security import SecurityUtils
@@ -518,3 +519,427 @@ def run_scheduler2(jobid: str,
Scheduler().start(jobid)
return schemas.Response(success=True)
@router.get("/sites/mapping", summary="获取站点域名到名称的映射", response_model=schemas.Response)
def get_sites_mapping(_: User = Depends(get_current_active_superuser)):
"""
获取站点域名到名称的映射关系
"""
try:
from app.db.site_oper import SiteOper
site_oper = SiteOper()
sites = site_oper.list()
mapping = {}
for site in sites:
mapping[site.domain] = site.name
return schemas.Response(success=True, data=mapping)
except Exception as e:
logger.error(f"获取站点映射失败:{str(e)}")
return schemas.Response(success=False, message=f"获取映射失败:{str(e)}")
@router.get("/cache/torrents", summary="获取种子缓存", response_model=schemas.Response)
def get_torrents_cache(_: User = Depends(get_current_active_superuser)):
"""
获取当前种子缓存数据
"""
from app.chain.torrents import TorrentsChain
torrents_chain = TorrentsChain()
# 获取spider和rss两种缓存
spider_cache = torrents_chain.get_torrents("spider")
rss_cache = torrents_chain.get_torrents("rss")
# 统计信息
spider_count = sum(len(torrents) for torrents in spider_cache.values())
rss_count = sum(len(torrents) for torrents in rss_cache.values())
# 转换为前端需要的格式
spider_data = []
for domain, contexts in spider_cache.items():
for context in contexts:
torrent_hash = HashUtils.md5(f"{context.torrent_info.title}{context.torrent_info.description}")
spider_data.append({
"hash": torrent_hash,
"domain": domain,
"title": context.torrent_info.title,
"description": context.torrent_info.description,
"size": context.torrent_info.size,
"pubdate": context.torrent_info.pubdate,
"site_name": context.torrent_info.site_name,
"media_name": context.media_info.title if context.media_info else "",
"media_year": context.media_info.year if context.media_info else "",
"media_type": context.media_info.type if context.media_info else "",
"season_episode": context.meta_info.season_episode if context.meta_info else "",
"resource_term": context.meta_info.resource_term if context.meta_info else "",
"enclosure": context.torrent_info.enclosure,
"page_url": context.torrent_info.page_url,
"poster_path": context.media_info.get_poster_image() if context.media_info else "",
"backdrop_path": context.media_info.get_backdrop_image() if context.media_info else ""
})
rss_data = []
for domain, contexts in rss_cache.items():
for context in contexts:
torrent_hash = HashUtils.md5(f"{context.torrent_info.title}{context.torrent_info.description}")
rss_data.append({
"hash": torrent_hash,
"domain": domain,
"title": context.torrent_info.title,
"description": context.torrent_info.description,
"size": context.torrent_info.size,
"pubdate": context.torrent_info.pubdate,
"site_name": context.torrent_info.site_name,
"media_name": context.media_info.title if context.media_info else "",
"media_year": context.media_info.year if context.media_info else "",
"media_type": context.media_info.type if context.media_info else "",
"season_episode": context.meta_info.season_episode if context.meta_info else "",
"resource_term": context.meta_info.resource_term if context.meta_info else "",
"enclosure": context.torrent_info.enclosure,
"page_url": context.torrent_info.page_url,
"poster_path": context.media_info.get_poster_image() if context.media_info else "",
"backdrop_path": context.media_info.get_backdrop_image() if context.media_info else ""
})
return schemas.Response(success=True, data={
"spider": {
"count": spider_count,
"sites": len(spider_cache),
"data": spider_data
},
"rss": {
"count": rss_count,
"sites": len(rss_cache),
"data": rss_data
}
})
@router.post("/cache/torrents/refresh", summary="刷新种子缓存", response_model=schemas.Response)
def refresh_torrents_cache(cache_type: str = "auto", _: User = Depends(get_current_active_superuser)):
"""
刷新种子缓存
:param cache_type: 缓存类型 auto/spider/rss
"""
from app.chain.torrents import TorrentsChain
torrents_chain = TorrentsChain()
try:
if cache_type == "auto":
cache_type = None
result = torrents_chain.refresh(stype=cache_type, sites=None)
# 统计刷新结果
total_count = sum(len(torrents) for torrents in result.values())
sites_count = len(result)
return schemas.Response(success=True, message=f"缓存刷新完成,共刷新 {sites_count} 个站点,{total_count} 个种子")
except Exception as e:
logger.error(f"刷新种子缓存失败:{str(e)}")
return schemas.Response(success=False, message=f"刷新失败:{str(e)}")
@router.delete("/cache/torrents", summary="清理种子缓存", response_model=schemas.Response)
def clear_torrents_cache(_: User = Depends(get_current_active_superuser)):
"""
清理所有种子缓存
"""
from app.chain.torrents import TorrentsChain
torrents_chain = TorrentsChain()
try:
torrents_chain.clear_torrents()
return schemas.Response(success=True, message="种子缓存清理完成")
except Exception as e:
logger.error(f"清理种子缓存失败:{str(e)}")
return schemas.Response(success=False, message=f"清理失败:{str(e)}")
@router.get("/cache/torrents/stats", summary="获取种子缓存统计", response_model=schemas.Response)
def get_torrents_cache_stats(_: User = Depends(get_current_active_superuser)):
"""
获取种子缓存统计信息
"""
from app.chain.torrents import TorrentsChain
torrents_chain = TorrentsChain()
# 获取缓存配置
cache_limit = settings.CACHE_CONF.get("torrents", 100)
refresh_limit = settings.CACHE_CONF.get("refresh", 30)
# 获取缓存数据
spider_cache = torrents_chain.get_torrents("spider")
rss_cache = torrents_chain.get_torrents("rss")
# 统计各站点缓存情况
spider_stats = []
for domain, contexts in spider_cache.items():
spider_stats.append({
"domain": domain,
"count": len(contexts),
"latest_date": max([ctx.torrent_info.pubdate for ctx in contexts if ctx.torrent_info.pubdate], default="")
})
rss_stats = []
for domain, contexts in rss_cache.items():
rss_stats.append({
"domain": domain,
"count": len(contexts),
"latest_date": max([ctx.torrent_info.pubdate for ctx in contexts if ctx.torrent_info.pubdate], default="")
})
return schemas.Response(success=True, data={
"config": {
"cache_limit": cache_limit,
"refresh_limit": refresh_limit,
"current_mode": settings.SUBSCRIBE_MODE
},
"spider": {
"total_count": sum(len(torrents) for torrents in spider_cache.values()),
"sites_count": len(spider_cache),
"sites": spider_stats
},
"rss": {
"total_count": sum(len(torrents) for torrents in rss_cache.values()),
"sites_count": len(rss_cache),
"sites": rss_stats
}
})
@router.delete("/cache/torrents/{cache_type}/{domain}/{torrent_hash}", summary="删除指定种子缓存", response_model=schemas.Response)
def delete_torrent_cache(cache_type: str, domain: str, torrent_hash: str,
_: User = Depends(get_current_active_superuser)):
"""
删除指定的种子缓存
:param cache_type: 缓存类型 spider/rss
:param domain: 站点域名
:param torrent_hash: 种子hash使用title+description的md5
"""
from app.chain.torrents import TorrentsChain
from app.utils.crypto import HashUtils
torrents_chain = TorrentsChain()
try:
# 获取当前缓存
cache_data = torrents_chain.get_torrents(cache_type)
if domain not in cache_data:
return schemas.Response(success=False, message=f"站点 {domain} 缓存不存在")
# 查找并删除指定种子
original_count = len(cache_data[domain])
cache_data[domain] = [
context for context in cache_data[domain]
if HashUtils.md5(f"{context.torrent_info.title}{context.torrent_info.description}") != torrent_hash
]
if len(cache_data[domain]) == original_count:
return schemas.Response(success=False, message="未找到指定的种子")
# 保存更新后的缓存
if cache_type == "spider":
torrents_chain.save_cache(cache_data, torrents_chain._spider_file)
else:
torrents_chain.save_cache(cache_data, torrents_chain._rss_file)
return schemas.Response(success=True, message="种子删除成功")
except Exception as e:
logger.error(f"删除种子缓存失败:{str(e)}")
return schemas.Response(success=False, message=f"删除失败:{str(e)}")
@router.post("/cache/torrents/{cache_type}/{domain}/{torrent_hash}/reidentify", summary="重新识别种子", response_model=schemas.Response)
def reidentify_torrent_cache(cache_type: str, domain: str, torrent_hash: str,
tmdbid: Optional[int] = None, doubanid: Optional[str] = None,
_: User = Depends(get_current_active_superuser)):
"""
重新识别指定的种子
:param cache_type: 缓存类型 spider/rss
:param domain: 站点域名
:param torrent_hash: 种子hash使用title+description的md5
:param tmdbid: 手动指定的TMDB ID
:param doubanid: 手动指定的豆瓣ID
"""
from app.chain.torrents import TorrentsChain
from app.chain.media import MediaChain
from app.core.metainfo import MetaInfo
from app.core.context import MediaInfo
from app.utils.crypto import HashUtils
from app.schemas.types import MediaType
torrents_chain = TorrentsChain()
media_chain = MediaChain()
try:
# 获取当前缓存
cache_data = torrents_chain.get_torrents(cache_type)
if domain not in cache_data:
return schemas.Response(success=False, message=f"站点 {domain} 缓存不存在")
# 查找指定种子
target_context = None
for context in cache_data[domain]:
if HashUtils.md5(f"{context.torrent_info.title}{context.torrent_info.description}") == torrent_hash:
target_context = context
break
if not target_context:
return schemas.Response(success=False, message="未找到指定的种子")
# 重新识别
if tmdbid or doubanid:
# 手动指定媒体信息
if tmdbid:
# 先尝试电影类型
tmdbinfo = media_chain.tmdb_info(tmdbid=tmdbid, mtype=MediaType.MOVIE)
if not tmdbinfo:
# 再尝试电视剧类型
tmdbinfo = media_chain.tmdb_info(tmdbid=tmdbid, mtype=MediaType.TV)
if tmdbinfo:
mediainfo = MediaInfo()
mediainfo.set_tmdb_info(tmdbinfo)
else:
mediainfo = None
else:
# 先尝试电影类型
doubaninfo = media_chain.douban_info(doubanid=doubanid, mtype=MediaType.MOVIE)
if not doubaninfo:
# 再尝试电视剧类型
doubaninfo = media_chain.douban_info(doubanid=doubanid, mtype=MediaType.TV)
if doubaninfo:
mediainfo = MediaInfo()
mediainfo.set_douban_info(doubaninfo)
else:
mediainfo = None
else:
# 自动重新识别
meta = MetaInfo(title=target_context.torrent_info.title,
subtitle=target_context.torrent_info.description)
mediainfo = media_chain.recognize_by_meta(meta)
if not mediainfo:
# 创建空的媒体信息
mediainfo = MediaInfo()
else:
# 清理多余数据
mediainfo.clear()
# 更新上下文中的媒体信息
target_context.media_info = mediainfo
# 保存更新后的缓存
if cache_type == "spider":
torrents_chain.save_cache(cache_data, torrents_chain._spider_file)
else:
torrents_chain.save_cache(cache_data, torrents_chain._rss_file)
return schemas.Response(success=True, message="重新识别完成", data={
"media_name": mediainfo.title if mediainfo else "",
"media_year": mediainfo.year if mediainfo else "",
"media_type": mediainfo.type.value if mediainfo and mediainfo.type else ""
})
except Exception as e:
logger.error(f"重新识别种子失败:{str(e)}")
return schemas.Response(success=False, message=f"重新识别失败:{str(e)}")
@router.get("/cache/images/stats", summary="获取图片缓存统计", response_model=schemas.Response)
def get_images_cache_stats(_: User = Depends(get_current_active_superuser)):
"""
获取图片缓存统计信息
"""
import os
from pathlib import Path
try:
images_cache_path = settings.CACHE_PATH / "images"
if not images_cache_path.exists():
return schemas.Response(success=True, data={
"total_files": 0,
"total_size": 0,
"cache_enabled": settings.GLOBAL_IMAGE_CACHE
})
total_files = 0
total_size = 0
# 递归统计所有图片文件
for root, dirs, files in os.walk(images_cache_path):
for file in files:
file_path = Path(root) / file
if file_path.suffix.lower() in settings.SECURITY_IMAGE_SUFFIXES:
total_files += 1
try:
total_size += file_path.stat().st_size
except (OSError, IOError):
continue
return schemas.Response(success=True, data={
"total_files": total_files,
"total_size": total_size,
"cache_enabled": settings.GLOBAL_IMAGE_CACHE,
"cache_path": str(images_cache_path)
})
except Exception as e:
logger.error(f"获取图片缓存统计失败:{str(e)}")
return schemas.Response(success=False, message=f"获取统计失败:{str(e)}")
@router.delete("/cache/images", summary="清理图片缓存", response_model=schemas.Response)
def clear_images_cache(_: User = Depends(get_current_active_superuser)):
"""
清理所有图片缓存
"""
try:
from app.utils.system import SystemUtils
images_cache_path = settings.CACHE_PATH / "images"
if not images_cache_path.exists():
return schemas.Response(success=True, message="图片缓存目录不存在")
# 清理图片缓存目录
cleared_count = SystemUtils.clear(images_cache_path, days=0)
return schemas.Response(success=True, message=f"图片缓存清理完成,清理了 {cleared_count} 个文件")
except Exception as e:
logger.error(f"清理图片缓存失败:{str(e)}")
return schemas.Response(success=False, message=f"清理失败:{str(e)}")
@router.post("/cache/images/clean", summary="清理过期图片缓存", response_model=schemas.Response)
def clean_expired_images_cache(days: int = 7, _: User = Depends(get_current_active_superuser)):
"""
清理过期的图片缓存
:param days: 保留天数默认7天
"""
try:
from app.utils.system import SystemUtils
images_cache_path = settings.CACHE_PATH / "images"
if not images_cache_path.exists():
return schemas.Response(success=True, message="图片缓存目录不存在")
# 清理过期图片缓存
cleared_count = SystemUtils.clear(images_cache_path, days=days)
return schemas.Response(success=True, message=f"过期图片缓存清理完成,清理了 {cleared_count} 个文件")
except Exception as e:
logger.error(f"清理过期图片缓存失败:{str(e)}")
return schemas.Response(success=False, message=f"清理失败:{str(e)}")