diff --git a/.gitignore b/.gitignore
index 3adf728b..ff0573f4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -163,5 +163,5 @@ cython_debug/
/auto_bangumi/const_dev.py
/config/bangumi.json
/auto_bangumi/tester.py
-/source/names.txt
+/resource/names.txt
diff --git a/.idea/Bangumi_Auto_Rename.iml b/.idea/Bangumi_Auto_Rename.iml
index 25d4c69a..9ac0f5af 100644
--- a/.idea/Bangumi_Auto_Rename.iml
+++ b/.idea/Bangumi_Auto_Rename.iml
@@ -4,7 +4,7 @@
-
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
index dc9ea490..37b833f1 100644
--- a/.idea/misc.xml
+++ b/.idea/misc.xml
@@ -1,4 +1,4 @@
-
+
\ No newline at end of file
diff --git a/auto_bangumi/app.py b/auto_bangumi/app.py
index 2d7be96e..35c111f9 100644
--- a/auto_bangumi/app.py
+++ b/auto_bangumi/app.py
@@ -29,6 +29,7 @@ def load_data_file():
if bangumi_data["data_version"] != settings.data_version or bangumi_data["rss_link"] != settings.rss_link:
bangumi_data["bangumi_info"] = []
bangumi_data["rss_link"] = settings.rss_link
+ logger.info("Rebuilding data information...")
return bangumi_data
diff --git a/auto_bangumi/core/download_client.py b/auto_bangumi/core/download_client.py
index 384c442f..27e2c70a 100644
--- a/auto_bangumi/core/download_client.py
+++ b/auto_bangumi/core/download_client.py
@@ -114,5 +114,13 @@ class DownloadClient:
if __name__ == "__main__":
- put = DownloadClient()
- put.add_rules()
+ try:
+ from const_dev import DEV_SETTINGS
+ except ModuleNotFoundError:
+ logger.debug("Please copy `const_dev.py` to `const_dev.py` to use custom settings")
+ settings.init(DEV_SETTINGS)
+ client = getClient()
+ try:
+ client.rss_remove_item(item_path="Mikan_RSS")
+ except ConflictError:
+ logger.info("No feed exists, start adding feed.")
diff --git a/auto_bangumi/core/eps_complete.py b/auto_bangumi/core/eps_complete.py
index ca2541bd..595f2fa8 100644
--- a/auto_bangumi/core/eps_complete.py
+++ b/auto_bangumi/core/eps_complete.py
@@ -33,7 +33,7 @@ class FullSeasonGet:
season = ""
else:
season = self.season
- search_str = re.sub(r"[& ]", "+",
+ search_str = re.sub(r"[\W_]", "+",
f"{self.group} {self.bangumi_name} {season} {self.subtitle} {self.source} {self.dpi}")
season = requests.get(
f"https://mikanani.me/RSS/Search?searchstr={search_str}"
diff --git a/auto_bangumi/downloader/qb_downloader.py b/auto_bangumi/downloader/qb_downloader.py
index 22795a72..397136e3 100644
--- a/auto_bangumi/downloader/qb_downloader.py
+++ b/auto_bangumi/downloader/qb_downloader.py
@@ -63,7 +63,18 @@ class QbDownloader:
self._client.rss_remove_item(item_path)
except Conflict409Error as e:
logger.exception(e)
+ logger.info("Add new RSS")
raise ConflictError()
def rss_set_rule(self, rule_name, rule_def):
self._client.rss_set_rule(rule_name, rule_def)
+
+
+if __name__ == "__main__":
+ try:
+ from const_dev import DEV_SETTINGS
+ except ModuleNotFoundError:
+ logger.debug("Please copy `const_dev.py` to `const_dev.py` to use custom settings")
+ settings.init(DEV_SETTINGS)
+ client = QbDownloader(settings.host_ip, settings.user_name, settings.password)
+ client.rss_remove_item("Mikan_RSS")
diff --git a/requirements.txt b/requirements.txt
index 7cbbf9ce..ba37918e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,5 +2,4 @@ qbittorrent-api
bs4
requests
lxml
-zhconv
diff --git a/resource/anidb.py b/resource/anidb.py
new file mode 100644
index 00000000..8902216b
--- /dev/null
+++ b/resource/anidb.py
@@ -0,0 +1,76 @@
+#! /usr/bin/python
+import re
+import requests
+from bs4 import BeautifulSoup
+from utils import json_config
+
+header = {
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ApplewebKit/537.36 (KHtml, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
+}
+
+
+def get_html(url):
+ requests.adapters.DEFAULT_RETRIES = 5 # 增加重连次数
+ s = requests.session()
+ s.keep_alive = False # 关闭多余连接
+ html = s.get(url=url, headers=header).text
+ print("get html success")
+ return html
+
+
+def get_list(year, season):
+ season = ["spring", "summer", "autumn", "winter"][season - 1]
+ url = "https://anidb.net/anime/season/%s/%s/" % (year, season)
+ html = get_html(url)
+ ids = re.findall("", html)
+ return ids
+
+
+def get_title(id):
+ url = "https://anidb.net/anime/%s" % id
+ soup = BeautifulSoup(get_html(url), "lxml")
+ titles = soup.find("div", id="tab_2_pane")
+ g = titles.findAll("th")
+ v = titles.findAll("td")
+ t_dic = {
+ "id": id,
+ "main": None,
+ "verified": None,
+ "en": None,
+ "chs": None,
+ "cht": None,
+ "jp": None,
+ "synonym": None,
+ "kana": None
+ }
+ for i in range(0, len(g)):
+ if g[i].text == "Main Title":
+ t_dic["main"] = re.sub("\(a\d+\)", "", v[i].text).strip("\n\t")
+ elif g[i].text == "Official Title":
+ if re.search("verified", str(v[i])):
+ t_dic["verified"] = v[i].find("label").text
+ if re.search("language: english", str(v[i])):
+ t_dic["en"] = v[i].find("label").text
+ elif re.search("span>zh-Hant", str(v[i])):
+ t_dic["cht"] = v[i].find("label").text
+ elif re.search("span>zh-Hans", str(v[i])):
+ t_dic["chs"] = v[i].find("label").text
+ elif re.search("language: japanese", str(v[i])):
+ t_dic["jp"] = v[i].find("label").text
+ elif g[i].text == "Synonym":
+ t_dic["synonym"] = v[i].text
+ elif g[i].text == "Kana":
+ t_dic["kana"] = v[i].text
+ return t_dic
+
+if __name__ == "__main__":
+ print("start")
+ # 年份,季度
+ id_list = (get_list(2022, 1))
+ for i in id_list:
+ url = f"http://api.anidb.net:9001/httpapi?request=anime&client=autobangumi&clientver=1&protover=1&aid={i}"
+ req = requests.get(url)
+ soup = BeautifulSoup(req.text, "xml")
+ titles = soup.find("titles")["official"]
+ for item in titles:
+ print(item)
diff --git a/source/names.txt b/resource/names.txt
similarity index 100%
rename from source/names.txt
rename to resource/names.txt