add some new feature, but it is not working now

This commit is contained in:
EstrellaXD
2022-05-18 20:47:38 +08:00
parent b82602751f
commit f54df0e675
5 changed files with 120 additions and 36 deletions

View File

@@ -1,7 +1,6 @@
# -*- coding: UTF-8 -*-
import sys
import time
import requests
from bs4 import BeautifulSoup
import json
@@ -27,19 +26,21 @@ class CollectRSS:
soup = BeautifulSoup(rss.text, 'xml')
item = soup.find_all('item')
for a in item:
name = str(a.find('title'))
name = re.sub('<title>|</title>', '', name)
parrten = r'\[|\]|\u3010|\u3011|\★|\*|\(|\)|\|\'
name = a.title.string
pattern = r'\[|\]|\u3010|\u3011|\★|\*|\(|\)|\|\|\_'
for i in range(2):
n = re.split(parrten, name)
n = re.split(pattern, name)
try:
name = re.sub(f'\[{n[1]}\]|【{n[1]}】|★{n[1]}', '', name)
except:
name = name
if re.search("\d{2,3}^月", n[1]) is None:
name = re.sub(f'\[{n[1]}\]|【{n[1]}】|★{n[1]}', '', name).strip()
else:
break
except IndexError:
break
for rule in episode_rules:
matchObj = re.match(rule, name, re.I)
if matchObj is not None:
new_name = re.sub(r'\[|\]', '', f'{matchObj.group(1)}')
match_obj = re.match(rule, name, re.I)
if match_obj is not None:
new_name = re.sub(r'\[|\]', '', f'{match_obj.group(1)}')
new_name = re.split(r'/', new_name)[-1].strip()
if new_name not in self.bangumi_title:
self.bangumi_title.append(new_name)

View File

@@ -1 +1,5 @@
qbittorrent_api==2022.4.30
requests~=2.27.1
bs4~=0.0.1
beautifulsoup4~=4.11.1

63
test/rule.json Normal file
View File

@@ -0,0 +1,63 @@
[
{
"group_name": [
"Lilith-Raws",
"NC-Raws",
"Skymoon-Raws",
"天月搬运组",
"LoliHouse",
"ANi",
"❀拨雪寻春❀",
"澄空学园&华盟字幕社"
],
"name_position": [2],
"second_split": true
},
{
"group_name": [
"猎户不鸽发布组",
"NaN-Raws",
"猎户随缘发布组",
"桜都字幕组",
"澄空学园&雪飘工作室",
"千夏字幕组",
"IET字幕组",
"离谱Sub",
"酷漫404",
"星空字幕组",
"轻之国度字幕组",
"枫叶字幕组",
"雪飘工作室",
"豌豆字幕组",
"云光字幕组"
],
"name_position": [2,3],
"second_split": false
},
{
"group_name": [
"喵萌奶茶屋",
"爱恋字母社",
"诸神字幕组",
"驯兽师联盟",
"夏沐字幕组"
],
"name_position": [3,4],
"second_split": false
},
{
"group_name": [
"漫猫字幕组"
],
"name_position": [4,5],
"second_split": false
},
{
"group_name": [
"幻樱"
],
"name_position": [5,7],
"second_split": false
}
]

View File

@@ -1,27 +1,30 @@
import re
import requests
from bs4 import BeautifulSoup
import json
import os
config_path = "config.json"
info_path = "bangumi.json"
def create_config():
if not os.path.exists(config_path):
config = {
"host_ip": "127.0.0.1:8080",
"user_name": "admin",
"password": "adminadmin",
"method": "pn",
"rss_link": "https://mikanani.me/RSS/MyBangumi?token=qTxKo48gH1SrFNy8X%2fCfQUoeElNsgKNWFNzNieKwBH8%3d",
"download_path": "/downloads/Bangumi"
}
with open(config_path,"w") as c:
json.dump(config, c, indent=4, separators=(',', ': '), ensure_ascii=False)
if not os.path.exists(info_path):
bangumi_info = [{"title": "simple","season": ""}]
with open(info_path, "w") as i:
json.dump(bangumi_info, i, indent=4, separators=(',', ': '), ensure_ascii=False)
print("请填入配置参数")
quit()
create_config()
url = "https://mikanani.me/RSS/Classic"
rss = requests.get(url, 'utf-8')
soup = BeautifulSoup(rss.text, 'xml')
items = soup.find_all('item')
with open("rule.json") as f:
rules = json.load(f)
split_rule = r"\[|\]|\【|\】|\★|\|\|\(|\)"
bangumi_title = ''
for item in items:
name = item.title.string
bangumi_title = ''
print(name)
exit_flag = False
for rule in rules:
for group in rule["group_name"]:
if re.search(group, name):
exit_flag = True
n = re.split(split_rule, name)
for i in rule["name_position"]:
bangumi_title = bangumi_title + ' ' + n[i].strip()
print(bangumi_title)
break
if exit_flag:
break

13
test/test2.py Normal file
View File

@@ -0,0 +1,13 @@
import re
import requests
url = "http://192.168.200.2"
try:
r = requests.get(url)
print(r.content)
except ConnectionError:
print("e")
except KeyboardInterrupt:
print("end")