🎨 更新 Python 依赖版本至 3.10,新增 pre-commit 工作流及配置文件

This commit is contained in:
2024-12-13 02:23:38 +08:00
parent 8462830c91
commit 8defcfdd66
38 changed files with 350 additions and 229 deletions

View File

@@ -1,16 +1,16 @@
from . import mg_Info, mg_Introduce, mg_Search
from . import mg_Info
from . import mg_Search
from . import mg_Introduce
# meogirl
async def meogirl () :
async def meogirl():
return mg_Info.meogirl()
# Search
async def search (msg : str, num : int = 3) :
async def search(msg: str, num: int = 3):
return str(await mg_Search.search(msg, num))
# Show
async def introduce (msg : str) :
async def introduce(msg: str):
return str(await mg_Introduce.introduce(msg))

View File

@@ -1,4 +1,3 @@
# Meogirl
def meogirl():
return "Meogirl指的是\"萌娘百科\"(https://zh.moegirl.org.cn/ , 简称\"萌百\"), 是一个\"万物皆可萌的百科全书!\"; 同时, MarshoTools也配有\"Meogirl\"插件, 可调用萌百的api"
return 'Meogirl指的是"萌娘百科"(https://zh.moegirl.org.cn/ , 简称"萌百"), 是一个"万物皆可萌的百科全书!"; 同时, MarshoTools也配有"Meogirl"插件, 可调用萌百的api'

View File

@@ -1,36 +1,38 @@
from nonebot.log import logger
import re
import httpx
import urllib.parse
import httpx
from bs4 import BeautifulSoup
from nonebot.log import logger
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
}
async def get_async_data (url) :
async with httpx.AsyncClient(timeout = None) as client:
return await client.get(url, headers = headers)
async def introduce (msg : str) :
logger.info(f"介绍 : \"{msg}\" ...")
async def get_async_data(url):
async with httpx.AsyncClient(timeout=None) as client:
return await client.get(url, headers=headers)
async def introduce(msg: str):
logger.info(f'介绍 : "{msg}" ...')
result = ""
url = "https://mzh.moegirl.org.cn/" + urllib.parse.quote_plus(msg)
response = await get_async_data(url)
logger.success(f"连接\"{url}\"完成, 状态码 : {response.status_code}")
logger.success(f'连接"{url}"完成, 状态码 : {response.status_code}')
soup = BeautifulSoup(response.text, "html.parser")
# 正常页
if response.status_code == 200 :
if response.status_code == 200:
"""
萌娘百科页面结构
div#mw-content-text
└── div#404search # 空白页面出现
└── div.mw-parser-output # 正常页面
└── div, p, table ... # 大量的解释项
萌娘百科页面结构
div#mw-content-text
└── div#404search # 空白页面出现
└── div.mw-parser-output # 正常页面
└── div, p, table ... # 大量的解释项
"""
result += msg + "\n"
@@ -44,7 +46,9 @@ async def introduce (msg : str) :
num = 0
for p_tag in p_tags:
p = str(p_tag)
p = re.sub(r"<script.*?</script>|<style.*?</style>", "", p, flags=re.DOTALL)
p = re.sub(
r"<script.*?</script>|<style.*?</style>", "", p, flags=re.DOTALL
)
p = re.sub(r"<.*?>", "", p, flags=re.DOTALL)
p = re.sub(r"\[.*?]", "", p, flags=re.DOTALL)
@@ -57,20 +61,21 @@ async def introduce (msg : str) :
return result
# 空白页
elif response.status_code == 404 :
logger.info(f"未找到\"{msg}\", 进行搜索")
elif response.status_code == 404:
logger.info(f'未找到"{msg}", 进行搜索')
from . import mg_Search
context = await mg_Search.search(msg, 1)
keyword = re.search(r".*?\n", context, flags = re.DOTALL).group()[: -1]
logger.success(f"搜索完成, 打开\"{keyword}\"")
context = await mg_Search.search(msg, 1)
keyword = re.search(r".*?\n", context, flags=re.DOTALL).group()[:-1]
logger.success(f'搜索完成, 打开"{keyword}"')
return await introduce(keyword)
# 搜索失败
elif response.status_code == 301 :
elif response.status_code == 301:
return f"未找到{msg}"
else :
else:
logger.error(f"网络错误, 状态码 : {response.status_code}")
return f"网络错误, 状态码 : {response.status_code}"

View File

@@ -1,76 +1,85 @@
from nonebot.log import logger
import urllib.parse
import httpx
import urllib.parse
from bs4 import BeautifulSoup
from nonebot.log import logger
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
}
async def get_async_data (url) :
async with httpx.AsyncClient(timeout = None) as client:
return await client.get(url, headers = headers)
async def search (msg : str, num : int) :
logger.info(f"搜索 : \"{msg}\" ...")
async def get_async_data(url):
async with httpx.AsyncClient(timeout=None) as client:
return await client.get(url, headers=headers)
async def search(msg: str, num: int):
logger.info(f'搜索 : "{msg}" ...')
result = ""
url = "https://mzh.moegirl.org.cn/index.php?search=" + urllib.parse.quote_plus(msg)
response = await get_async_data(url)
logger.success(f"连接\"{url}\"完成, 状态码 : {response.status_code}")
logger.success(f'连接"{url}"完成, 状态码 : {response.status_code}')
# 正常搜索
if response.status_code == 200 :
if response.status_code == 200:
"""
萌娘百科搜索页面结构
div.searchresults
└── p ...
└── ul.mw-search-results # 若无, 证明无搜索结果
└── li # 一个搜索结果
└── div.mw-search-result-heading > a # 标题
└── div.mw-searchresult # 内容
└── div.mw-search-result-data
└── li ...
└── li ...
萌娘百科搜索页面结构
div.searchresults
└── p ...
└── ul.mw-search-results # 若无, 证明无搜索结果
└── li # 一个搜索结果
└── div.mw-search-result-heading > a # 标题
└── div.mw-searchresult # 内容
└── div.mw-search-result-data
└── li ...
└── li ...
"""
soup = BeautifulSoup(response.text, "html.parser")
# 检测ul.mw-search-results, 是否有结果
ul_tag = soup.find("ul", class_ = "mw-search-results")
if ul_tag :
ul_tag = soup.find("ul", class_="mw-search-results")
if ul_tag:
li_tags = ul_tag.find_all("li")
for li_tag in li_tags :
for li_tag in li_tags:
div_heading = li_tag.find("div", class_ = "mw-search-result-heading")
if div_heading :
div_heading = li_tag.find("div", class_="mw-search-result-heading")
if div_heading:
a_tag = div_heading.find("a")
result += a_tag["title"] + "\n"
logger.info(f"搜索到 : \"{a_tag["title"]}\"")
logger.info(f'搜索到 : "{a_tag["title"]}"')
div_result = li_tag.find("div", class_="searchresult")
if div_result :
content = str(div_result).replace("<div class=\"searchresult\">", "").replace("</div>", "")
content = content.replace("<span class=\"searchmatch\">", "").replace("</span>", "")
if div_result:
content = (
str(div_result)
.replace('<div class="searchresult">', "")
.replace("</div>", "")
)
content = content.replace('<span class="searchmatch">', "").replace(
"</span>", ""
)
result += content + "\n"
num -= 1
if num == 0 :
if num == 0:
break
return result
# 无ul.mw-search-results, 无结果
else :
else:
logger.info("无结果")
return "无结果"
# 重定向
elif response.status_code == 302 :
logger.info(f"\"{msg}\"已被重定向至\"{response.headers.get("location")}\"")
elif response.status_code == 302:
logger.info(f'"{msg}"已被重定向至"{response.headers.get("location")}"')
# 读取重定向结果
from . import mg_Introduce
return await mg_Introduce.introduce(msg)
else :
else:
logger.error(f"网络错误, 状态码 : {response.status_code}")
return f"网络错误, 状态码 : {response.status_code}"

View File

@@ -48,4 +48,4 @@
]
}
}
]
]