perf: comprehensive performance optimization for backend and frontend

Backend: shared HTTP connection pool, concurrent RSS/torrent/notification
operations, TMDB/Mikan result caching, database indexes, pre-compiled
regex, __slots__ on dataclasses, O(1) set-based dedup, frozenset lookups,
batch RSS enable/disable, asyncio.to_thread for blocking calls.

Frontend: shallowRef for large arrays, computed table columns, watch
instead of watchEffect, scoped style fix, typed emits, noImplicitAny,
useIntervalFn lifecycle management, shared useClipboard instance,
shallow clone for shared template objects.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Estrella Pan
2026-01-24 20:46:45 +01:00
parent 929a88c343
commit cba4988e52
38 changed files with 409 additions and 262 deletions

View File

@@ -1,3 +1,4 @@
import logging
import re
from bs4 import BeautifulSoup
@@ -6,8 +7,16 @@ from urllib3.util import parse_url
from module.network import RequestContent
from module.utils import save_image
logger = logging.getLogger(__name__)
# In-memory cache for Mikan homepage lookups
_mikan_cache: dict[str, tuple[str, str]] = {}
async def mikan_parser(homepage: str):
if homepage in _mikan_cache:
logger.debug(f"[Mikan] Cache hit for {homepage}")
return _mikan_cache[homepage]
root_path = parse_url(homepage).host
async with RequestContent() as req:
content = await req.get_html(homepage)
@@ -23,8 +32,12 @@ async def mikan_parser(homepage: str):
img = await req.get_content(f"https://{root_path}{poster_path}")
suffix = poster_path.split(".")[-1]
poster_link = save_image(img, suffix)
return poster_link, official_title
return "", ""
result = (poster_link, official_title)
_mikan_cache[homepage] = result
return result
result = ("", "")
_mikan_cache[homepage] = result
return result
if __name__ == '__main__':

View File

@@ -1,3 +1,4 @@
import logging
import re
import time
from dataclasses import dataclass
@@ -6,8 +7,13 @@ from module.conf import TMDB_API
from module.network import RequestContent
from module.utils import save_image
logger = logging.getLogger(__name__)
TMDB_URL = "https://api.themoviedb.org"
# In-memory cache for TMDB lookups to avoid repeated API calls
_tmdb_cache: dict[str, "TMDBInfo | None"] = {}
@dataclass
class TMDBInfo:
@@ -57,6 +63,11 @@ def get_season(seasons: list) -> tuple[int, str]:
async def tmdb_parser(title, language, test: bool = False) -> TMDBInfo | None:
cache_key = f"{title}:{language}"
if cache_key in _tmdb_cache:
logger.debug(f"[TMDB] Cache hit for {title}")
return _tmdb_cache[cache_key]
async with RequestContent() as req:
url = search_url(title)
contents = await req.get_json(url)
@@ -99,7 +110,7 @@ async def tmdb_parser(title, language, test: bool = False) -> TMDBInfo | None:
poster_link = "https://image.tmdb.org/t/p/w780" + poster_path
else:
poster_link = None
return TMDBInfo(
result = TMDBInfo(
id,
official_title,
original_title,
@@ -108,7 +119,10 @@ async def tmdb_parser(title, language, test: bool = False) -> TMDBInfo | None:
str(year_number),
poster_link,
)
_tmdb_cache[cache_key] = result
return result
else:
_tmdb_cache[cache_key] = None
return None

View File

@@ -16,6 +16,8 @@ RULES = [
r"(.*)(?:S\d{2})?EP?(\d{1,4}(?:\.\d{1,2})?)(.*)",
]
COMPILED_RULES = [re.compile(rule, re.I) for rule in RULES]
SUBTITLE_LANG = {
"zh-tw": ["tc", "cht", "", "zh-tw"],
"zh": ["sc", "chs", "", "zh"],
@@ -34,10 +36,11 @@ def get_path_basename(torrent_path: str) -> str:
return Path(torrent_path).name
_GROUP_SPLIT_RE = re.compile(r"[\[\]()【】()]")
def get_group(group_and_title) -> tuple[str | None, str]:
n = re.split(r"[\[\]()【】()]", group_and_title)
while "" in n:
n.remove("")
n = [x for x in _GROUP_SPLIT_RE.split(group_and_title) if x]
if len(n) > 1:
if re.match(r"\d+", n[1]):
return None, group_and_title
@@ -73,8 +76,8 @@ def torrent_parser(
if torrent_name is None:
match_names = match_names[1:]
for match_name in match_names:
for rule in RULES:
match_obj = re.match(rule, match_name, re.I)
for compiled_rule in COMPILED_RULES:
match_obj = compiled_rule.match(match_name)
if match_obj:
group, title = get_group(match_obj.group(1))
if not season: