mirror of
https://github.com/LiteyukiStudio/nonebot-plugin-marshoai.git
synced 2025-12-24 11:16:41 +00:00
Compare commits
5 Commits
snowykami-
...
feat/strea
| Author | SHA1 | Date | |
|---|---|---|---|
| d75b468330 | |||
|
|
3fa1be27bc | ||
|
|
52d218fb8d | ||
|
|
35beecb819 | ||
| 780df08a65 |
10
.github/workflows/docs-build.yml
vendored
10
.github/workflows/docs-build.yml
vendored
@@ -1,6 +1,9 @@
|
||||
name: Deploy VitePress site to Liteyuki PaaS
|
||||
|
||||
on: ["push", "pull_request_target"]
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -25,7 +28,7 @@ jobs:
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: "3.11"
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Setup API markdown
|
||||
run: |-
|
||||
@@ -49,10 +52,11 @@ jobs:
|
||||
run: |-
|
||||
pnpm run docs:build
|
||||
|
||||
|
||||
- name: "发布"
|
||||
run: |
|
||||
npx -p "@getmeli/cli" meli upload docs/.vitepress/dist \
|
||||
--url "https://dash.apage.dev" \
|
||||
--url "https://meli.liteyuki.icu" \
|
||||
--site "$MELI_SITE" \
|
||||
--token "$MELI_TOKEN" \
|
||||
--release "$GITHUB_SHA"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
<!--suppress LongLine -->
|
||||
<div align="center">
|
||||
<a href="https://marshoai-docs.pages.liteyuki.icu"><img src="https://marshoai-docs.pages.liteyuki.icu/marsho-full.svg" width="800" height="430" alt="MarshoLogo"></a>
|
||||
<a href="https://marshoai-docs.meli.liteyuki.icu"><img src="https://marshoai-docs.meli.liteyuki.icu/marsho-full.svg" width="800" height="430" alt="MarshoLogo"></a>
|
||||
<br>
|
||||
</div>
|
||||
|
||||
@@ -48,7 +48,7 @@ _谁不喜欢回复消息快又可爱的猫娘呢?_
|
||||
|
||||
## 😼 使用
|
||||
|
||||
请查看[使用文档](https://marshoai-docs.pages.liteyuki.icu/start/use.html)
|
||||
请查看[使用文档](https://marshoai-docs.meli.liteyuki.icu/start/use)
|
||||
|
||||
## ❤ 鸣谢&版权说明
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
<!--suppress LongLine -->
|
||||
<div align="center">
|
||||
<a href="https://marshoai-docs.pages.liteyuki.icu"><img src="https://marshoai-docs.pages.liteyuki.icu/marsho-full.svg" width="800" height="430" alt="MarshoLogo"></a>
|
||||
<a href="https://marshoai-docs.meli.liteyuki.icu"><img src="https://marshoai-docs.meli.liteyuki.icu/marsho-full.svg" width="800" height="430" alt="MarshoLogo"></a>
|
||||
<br>
|
||||
</div>
|
||||
|
||||
@@ -48,7 +48,7 @@ Plugin internally installed the catgirl character of Marsho, is able to have a c
|
||||
- 🐾 Play! I like play with friends!
|
||||
|
||||
## 😼 Usage
|
||||
Please read [Documentation](https://marshoai-docs.pages.liteyuki.icu/start/use.html)
|
||||
Please read [Documentation](https://marshoai-docs.meli.liteyuki.icu/start/install)
|
||||
|
||||
## ❤ Thanks&Copyright
|
||||
This project uses the following code from other projects:
|
||||
|
||||
@@ -8,13 +8,12 @@ import { generateSidebar } from 'vitepress-sidebar'
|
||||
// https://vitepress.dev/reference/site-config
|
||||
export default defineConfig({
|
||||
head: [
|
||||
["script", { src: "https://cdn.liteyuki.icu/js/liteyuki_footer.js" }],
|
||||
['link', { rel: 'icon', type: 'image/x-icon', href: '/favicon.ico' }],
|
||||
],
|
||||
rewrites: {
|
||||
[`${defaultLang}/:rest*`]: ":rest*",
|
||||
},
|
||||
cleanUrls: false,
|
||||
cleanUrls: true,
|
||||
themeConfig: {
|
||||
// https://vitepress.dev/reference/default-theme-config
|
||||
logo: {
|
||||
|
||||
@@ -65,7 +65,7 @@ When nonebot linked to OneBot v11 adapter, can recieve double click and response
|
||||
MarshoTools is a feature added in `v0.5.0`, support loading external function library to provide Function Call for Marsho.
|
||||
|
||||
## 🧩 Marsho Plugin
|
||||
Marsho Plugin is a feature added in `v1.0.0`, replacing the old MarshoTools feature. [Documentation](https://marshoai-docs.pages.liteyuki.icu/dev/extension)
|
||||
Marsho Plugin is a feature added in `v1.0.0`, replacing the old MarshoTools feature. [Documentation](https://marshoai-docs.meli.liteyuki.icu/dev/extension)
|
||||
|
||||
## 👍 Praise list
|
||||
|
||||
|
||||
@@ -68,7 +68,7 @@ GitHub Models API 的限制较多,不建议使用,建议通过修改`MARSHOA
|
||||
|
||||
## 🧩 小棉插件
|
||||
|
||||
小棉插件是`v1.0.0`的新增功能,替代旧的小棉工具功能。[使用文档](https://marshoai-docs.pages.liteyuki.icu/dev/extension)
|
||||
小棉插件是`v1.0.0`的新增功能,替代旧的小棉工具功能。[使用文档](https://marshoai-docs.meli.liteyuki.icu/dev/extension)
|
||||
|
||||
## 👍 夸赞名单
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ OPENAI_NEW_MODELS: list = [
|
||||
INTRODUCTION: str = f"""MarshoAI-NoneBot by LiteyukiStudio
|
||||
你好喵~我是一只可爱的猫娘AI,名叫小棉~🐾!
|
||||
我的主页在这里哦~↓↓↓
|
||||
https://marshoai-docs.pages.liteyuki.icu
|
||||
https://marshoai-docs.meli.liteyuki.icu
|
||||
|
||||
※ 使用 「{config.marshoai_default_name}.status」命令获取状态信息。
|
||||
※ 使用「{config.marshoai_default_name}.help」命令获取使用说明。"""
|
||||
|
||||
@@ -17,9 +17,10 @@ from nonebot.matcher import (
|
||||
current_event,
|
||||
current_matcher,
|
||||
)
|
||||
from nonebot_plugin_alconna.uniseg import UniMessage, UniMsg, get_message_id, get_target
|
||||
from nonebot_plugin_alconna.uniseg import UniMessage, UniMsg
|
||||
from openai import AsyncOpenAI, AsyncStream
|
||||
from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
|
||||
from openai.types.chat.chat_completion import Choice
|
||||
|
||||
from .config import config
|
||||
from .constants import SUPPORT_IMAGE_MODELS
|
||||
@@ -35,7 +36,6 @@ from .util import (
|
||||
make_chat_openai,
|
||||
parse_richtext,
|
||||
)
|
||||
from .utils.processor import process_chat_stream
|
||||
|
||||
|
||||
class MarshoHandler:
|
||||
@@ -51,7 +51,7 @@ class MarshoHandler:
|
||||
# self.state: T_State = current_handler.get().state
|
||||
self.matcher: Matcher = current_matcher.get()
|
||||
self.message_id: str = UniMessage.get_message_id(self.event)
|
||||
self.target = get_target(self.event)
|
||||
self.target = UniMessage.get_target(self.event)
|
||||
|
||||
async def process_user_input(
|
||||
self, user_input: UniMsg, model_name: str
|
||||
@@ -103,7 +103,7 @@ class MarshoHandler:
|
||||
处理单条聊天
|
||||
"""
|
||||
|
||||
context_msg = await get_prompt(model_name) + (
|
||||
context_msg = get_prompt(model_name) + (
|
||||
self.context.build(self.target.id, self.target.private)
|
||||
)
|
||||
response = await make_chat_openai(
|
||||
@@ -117,10 +117,10 @@ class MarshoHandler:
|
||||
|
||||
async def handle_function_call(
|
||||
self,
|
||||
completion: Union[ChatCompletion],
|
||||
completion: Union[ChatCompletion, AsyncStream[ChatCompletionChunk]],
|
||||
user_message: Union[str, list],
|
||||
model_name: str,
|
||||
tools_list: list | None = None,
|
||||
tools_list: list,
|
||||
):
|
||||
# function call
|
||||
# 需要获取额外信息,调用函数工具
|
||||
@@ -188,7 +188,7 @@ class MarshoHandler:
|
||||
self,
|
||||
user_message: Union[str, list],
|
||||
model_name: str,
|
||||
tools_list: list | None = None,
|
||||
tools_list: list,
|
||||
stream: bool = False,
|
||||
tool_message: Optional[list] = None,
|
||||
) -> Optional[Tuple[UserMessage, ChatCompletionMessage]]:
|
||||
@@ -210,7 +210,10 @@ class MarshoHandler:
|
||||
tools_list=tools_list,
|
||||
tool_message=tool_message,
|
||||
)
|
||||
choice = response.choices[0] # type: ignore
|
||||
if isinstance(response, ChatCompletion):
|
||||
choice = response.choices[0]
|
||||
else:
|
||||
raise ValueError("Unexpected response type")
|
||||
# Sprint(choice)
|
||||
# 当tool_calls非空时,将finish_reason设置为TOOL_CALLS
|
||||
if choice.message.tool_calls is not None and config.marshoai_fix_toolcalls:
|
||||
@@ -257,9 +260,9 @@ class MarshoHandler:
|
||||
self,
|
||||
user_message: Union[str, list],
|
||||
model_name: str,
|
||||
tools_list: list | None = None,
|
||||
tools_list: list,
|
||||
tools_message: Optional[list] = None,
|
||||
) -> ChatCompletion:
|
||||
) -> Union[ChatCompletion, None]:
|
||||
"""
|
||||
处理流式请求
|
||||
"""
|
||||
@@ -272,6 +275,54 @@ class MarshoHandler:
|
||||
)
|
||||
|
||||
if isinstance(response, AsyncStream):
|
||||
return await process_chat_stream(response)
|
||||
else:
|
||||
raise TypeError("Unexpected response type for stream request")
|
||||
reasoning_contents = ""
|
||||
answer_contents = ""
|
||||
last_chunk = None
|
||||
is_first_token_appeared = False
|
||||
is_answering = False
|
||||
async for chunk in response:
|
||||
last_chunk = chunk
|
||||
# print(chunk)
|
||||
if not is_first_token_appeared:
|
||||
logger.debug(f"{chunk.id}: 第一个 token 已出现")
|
||||
is_first_token_appeared = True
|
||||
if not chunk.choices:
|
||||
logger.info("Usage:", chunk.usage)
|
||||
else:
|
||||
delta = chunk.choices[0].delta
|
||||
if (
|
||||
hasattr(delta, "reasoning_content")
|
||||
and delta.reasoning_content is not None
|
||||
):
|
||||
reasoning_contents += delta.reasoning_content
|
||||
else:
|
||||
if not is_answering:
|
||||
logger.debug(
|
||||
f"{chunk.id}: 思维链已输出完毕或无 reasoning_content 字段输出"
|
||||
)
|
||||
is_answering = True
|
||||
if delta.content is not None:
|
||||
answer_contents += delta.content
|
||||
# print(last_chunk)
|
||||
# 创建新的 ChatCompletion 对象
|
||||
if last_chunk and last_chunk.choices:
|
||||
message = ChatCompletionMessage(
|
||||
content=answer_contents,
|
||||
role="assistant",
|
||||
tool_calls=last_chunk.choices[0].delta.tool_calls, # type: ignore
|
||||
)
|
||||
choice = Choice(
|
||||
finish_reason=last_chunk.choices[0].finish_reason, # type: ignore
|
||||
index=last_chunk.choices[0].index,
|
||||
message=message,
|
||||
)
|
||||
return ChatCompletion(
|
||||
id=last_chunk.id,
|
||||
choices=[choice],
|
||||
created=last_chunk.created,
|
||||
model=last_chunk.model,
|
||||
system_fingerprint=last_chunk.system_fingerprint,
|
||||
object="chat.completion",
|
||||
usage=last_chunk.usage,
|
||||
)
|
||||
return None
|
||||
|
||||
@@ -15,14 +15,7 @@ from nonebot.params import CommandArg
|
||||
from nonebot.permission import SUPERUSER
|
||||
from nonebot.rule import to_me
|
||||
from nonebot.typing import T_State
|
||||
from nonebot_plugin_alconna import (
|
||||
Emoji,
|
||||
MsgTarget,
|
||||
UniMessage,
|
||||
UniMsg,
|
||||
message_reaction,
|
||||
on_alconna,
|
||||
)
|
||||
from nonebot_plugin_alconna import MsgTarget, UniMessage, UniMsg, on_alconna
|
||||
|
||||
from .config import config
|
||||
from .constants import INTRODUCTION, SUPPORT_IMAGE_MODELS
|
||||
@@ -32,7 +25,6 @@ from .instances import client, context, model_name, target_list, tools
|
||||
from .metadata import metadata
|
||||
from .plugin.func_call.caller import get_function_calls
|
||||
from .util import *
|
||||
from .utils.processor import process_chat_stream
|
||||
|
||||
|
||||
async def at_enable():
|
||||
@@ -234,7 +226,6 @@ async def marsho(
|
||||
if not text:
|
||||
# 发送说明
|
||||
# await UniMessage(metadata.usage + "\n当前使用的模型:" + model_name).send()
|
||||
await message_reaction(Emoji("38"))
|
||||
await marsho_cmd.finish(INTRODUCTION)
|
||||
backup_context = await get_backup_context(target.id, target.private)
|
||||
if backup_context:
|
||||
@@ -265,7 +256,6 @@ async def marsho(
|
||||
map(lambda v: v.data(), get_function_calls().values())
|
||||
)
|
||||
logger.info(f"正在获取回答,模型:{model_name}")
|
||||
await message_reaction(Emoji("66"))
|
||||
# logger.info(f"上下文:{context_msg}")
|
||||
response = await handler.handle_common_chat(
|
||||
usermsg, model_name, tools_lists, config.marshoai_stream
|
||||
@@ -292,23 +282,19 @@ with contextlib.suppress(ImportError): # 优化先不做()
|
||||
async def poke(event: Event):
|
||||
|
||||
user_nickname = await get_nickname_by_user_id(event.get_user_id())
|
||||
usermsg = await get_prompt(model_name) + [
|
||||
UserMessage(content=f"*{user_nickname}{config.marshoai_poke_suffix}"),
|
||||
]
|
||||
try:
|
||||
if config.marshoai_poke_suffix != "":
|
||||
logger.info(f"收到戳一戳,用户昵称:{user_nickname}")
|
||||
|
||||
pre_response = await make_chat_openai(
|
||||
response = await make_chat_openai(
|
||||
client=client,
|
||||
model_name=model_name,
|
||||
msg=usermsg,
|
||||
stream=config.marshoai_stream,
|
||||
msg=get_prompt(model_name)
|
||||
+ [
|
||||
UserMessage(
|
||||
content=f"*{user_nickname}{config.marshoai_poke_suffix}"
|
||||
),
|
||||
],
|
||||
)
|
||||
if isinstance(pre_response, AsyncStream):
|
||||
response = await process_chat_stream(pre_response)
|
||||
else:
|
||||
response = pre_response
|
||||
choice = response.choices[0] # type: ignore
|
||||
if choice.finish_reason == CompletionsFinishReason.STOPPED:
|
||||
content = extract_content_and_think(choice.message)[0]
|
||||
|
||||
@@ -7,7 +7,6 @@ import sys
|
||||
import traceback
|
||||
|
||||
from nonebot import logger
|
||||
from typing_extensions import deprecated
|
||||
|
||||
from .config import config
|
||||
|
||||
@@ -74,7 +73,6 @@ class MarshoContext:
|
||||
return self._get_target_dict(is_private).setdefault(target_id, [])
|
||||
|
||||
|
||||
@deprecated("小棉工具已弃用,无法正常调用")
|
||||
class MarshoTools:
|
||||
"""
|
||||
Marsho 的工具类
|
||||
|
||||
@@ -2,7 +2,6 @@ import base64
|
||||
import json
|
||||
import mimetypes
|
||||
import re
|
||||
import ssl
|
||||
import uuid
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
@@ -18,7 +17,7 @@ from nonebot_plugin_alconna import Text as TextMsg
|
||||
from nonebot_plugin_alconna import UniMessage
|
||||
from openai import AsyncOpenAI, AsyncStream, NotGiven
|
||||
from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
|
||||
from zhDateTime import DateTime # type: ignore
|
||||
from zhDateTime import DateTime
|
||||
|
||||
from ._types import DeveloperMessage
|
||||
from .cache.decos import *
|
||||
@@ -59,8 +58,6 @@ _praises_init_data = {
|
||||
"""
|
||||
初始夸赞名单之数据
|
||||
"""
|
||||
_ssl_context = ssl.create_default_context()
|
||||
_ssl_context.set_ciphers("DEFAULT")
|
||||
|
||||
|
||||
async def get_image_raw_and_type(
|
||||
@@ -77,7 +74,7 @@ async def get_image_raw_and_type(
|
||||
tuple[bytes, str]: 图片二进制数据, 图片MIME格式
|
||||
"""
|
||||
|
||||
async with httpx.AsyncClient(verify=_ssl_context) as client:
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(url, headers=_browser_headers, timeout=timeout)
|
||||
if response.status_code == 200:
|
||||
# 获取图片数据
|
||||
@@ -101,7 +98,9 @@ async def get_image_b64(url: str, timeout: int = 10) -> Optional[str]:
|
||||
return: 图片base64编码
|
||||
"""
|
||||
|
||||
if data_type := await get_image_raw_and_type(url, timeout):
|
||||
if data_type := await get_image_raw_and_type(
|
||||
url.replace("https://", "http://"), timeout
|
||||
):
|
||||
# image_format = content_type.split("/")[1] if content_type else "jpeg"
|
||||
base64_image = base64.b64encode(data_type[0]).decode("utf-8")
|
||||
data_url = "data:{};base64,{}".format(data_type[1], base64_image)
|
||||
@@ -137,15 +136,15 @@ async def make_chat_openai(
|
||||
|
||||
|
||||
@from_cache("praises")
|
||||
async def get_praises():
|
||||
def get_praises():
|
||||
praises_file = store.get_plugin_data_file(
|
||||
"praises.json"
|
||||
) # 夸赞名单文件使用localstore存储
|
||||
if not praises_file.exists():
|
||||
async with aiofiles.open(praises_file, "w", encoding="utf-8") as f:
|
||||
await f.write(json.dumps(_praises_init_data, ensure_ascii=False, indent=4))
|
||||
async with aiofiles.open(praises_file, "r", encoding="utf-8") as f:
|
||||
data = json.loads(await f.read())
|
||||
with open(praises_file, "w", encoding="utf-8") as f:
|
||||
json.dump(_praises_init_data, f, ensure_ascii=False, indent=4)
|
||||
with open(praises_file, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
praises_json = data
|
||||
return praises_json
|
||||
|
||||
@@ -161,8 +160,8 @@ async def refresh_praises_json():
|
||||
return data
|
||||
|
||||
|
||||
async def build_praises() -> str:
|
||||
praises = await get_praises()
|
||||
def build_praises() -> str:
|
||||
praises = get_praises()
|
||||
result = ["你喜欢以下几个人物,他们有各自的优点:"]
|
||||
for item in praises["like"]:
|
||||
result.append(f"名字:{item['name']},优点:{item['advantages']}")
|
||||
@@ -214,8 +213,8 @@ async def set_nickname(user_id: str, name: str):
|
||||
data[user_id] = name
|
||||
if name == "" and user_id in data:
|
||||
del data[user_id]
|
||||
async with aiofiles.open(filename, "w", encoding="utf-8") as f:
|
||||
await f.write(json.dumps(data, ensure_ascii=False, indent=4))
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, ensure_ascii=False, indent=4)
|
||||
return data
|
||||
|
||||
|
||||
@@ -238,11 +237,11 @@ async def refresh_nickname_json():
|
||||
logger.error("刷新 nickname_json 表错误:无法载入 nickname.json 文件")
|
||||
|
||||
|
||||
async def get_prompt(model: str) -> List[Dict[str, Any]]:
|
||||
def get_prompt(model: str) -> List[Dict[str, Any]]:
|
||||
"""获取系统提示词"""
|
||||
prompts = config.marshoai_additional_prompt
|
||||
if config.marshoai_enable_praises:
|
||||
praises_prompt = await build_praises()
|
||||
praises_prompt = build_praises()
|
||||
prompts += praises_prompt
|
||||
|
||||
if config.marshoai_enable_time_prompt:
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
from nonebot.log import logger
|
||||
from openai import AsyncStream
|
||||
from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
|
||||
from openai.types.chat.chat_completion import Choice
|
||||
|
||||
|
||||
async def process_chat_stream(
|
||||
stream: AsyncStream[ChatCompletionChunk],
|
||||
) -> ChatCompletion:
|
||||
reasoning_contents = ""
|
||||
answer_contents = ""
|
||||
last_chunk = None
|
||||
is_first_token_appeared = False
|
||||
is_answering = False
|
||||
async for chunk in stream:
|
||||
last_chunk = chunk
|
||||
# print(chunk)
|
||||
if not is_first_token_appeared:
|
||||
logger.debug(f"{chunk.id}: 第一个 token 已出现")
|
||||
is_first_token_appeared = True
|
||||
if not chunk.choices:
|
||||
logger.info("Usage:", chunk.usage)
|
||||
else:
|
||||
delta = chunk.choices[0].delta
|
||||
if (
|
||||
hasattr(delta, "reasoning_content")
|
||||
and delta.reasoning_content is not None
|
||||
):
|
||||
reasoning_contents += delta.reasoning_content
|
||||
else:
|
||||
if not is_answering:
|
||||
logger.debug(
|
||||
f"{chunk.id}: 思维链已输出完毕或无 reasoning_content 字段输出"
|
||||
)
|
||||
is_answering = True
|
||||
if delta.content is not None:
|
||||
answer_contents += delta.content
|
||||
# print(last_chunk)
|
||||
# 创建新的 ChatCompletion 对象
|
||||
if last_chunk and last_chunk.choices:
|
||||
message = ChatCompletionMessage(
|
||||
content=answer_contents,
|
||||
role="assistant",
|
||||
tool_calls=last_chunk.choices[0].delta.tool_calls, # type: ignore
|
||||
)
|
||||
if reasoning_contents != "":
|
||||
setattr(message, "reasoning_content", reasoning_contents)
|
||||
choice = Choice(
|
||||
finish_reason=last_chunk.choices[0].finish_reason, # type: ignore
|
||||
index=last_chunk.choices[0].index,
|
||||
message=message,
|
||||
)
|
||||
return ChatCompletion(
|
||||
id=last_chunk.id,
|
||||
choices=[choice],
|
||||
created=last_chunk.created,
|
||||
model=last_chunk.model,
|
||||
system_fingerprint=last_chunk.system_fingerprint,
|
||||
object="chat.completion",
|
||||
usage=last_chunk.usage,
|
||||
)
|
||||
else:
|
||||
return ChatCompletion(
|
||||
id="",
|
||||
choices=[],
|
||||
created=0,
|
||||
model="",
|
||||
system_fingerprint="",
|
||||
object="chat.completion",
|
||||
usage=None,
|
||||
)
|
||||
8
pdm.lock
generated
8
pdm.lock
generated
@@ -5,7 +5,7 @@
|
||||
groups = ["default", "dev", "test"]
|
||||
strategy = ["inherit_metadata"]
|
||||
lock_version = "4.5.0"
|
||||
content_hash = "sha256:9dd3edfe69c332deac360af2685358e82c5dac0870900668534fc6f1d34040f8"
|
||||
content_hash = "sha256:d7ab3d9ca825de512d4f87ec846f7fddcf3d5796a7c9562e60c8c7d39c058817"
|
||||
|
||||
[[metadata.targets]]
|
||||
requires_python = "~=3.10"
|
||||
@@ -1485,7 +1485,7 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "nonebot-plugin-alconna"
|
||||
version = "0.57.0"
|
||||
version = "0.54.1"
|
||||
requires_python = ">=3.9"
|
||||
summary = "Alconna Adapter for Nonebot"
|
||||
groups = ["default"]
|
||||
@@ -1499,8 +1499,8 @@ dependencies = [
|
||||
"tarina<0.7,>=0.6.8",
|
||||
]
|
||||
files = [
|
||||
{file = "nonebot_plugin_alconna-0.57.0-py3-none-any.whl", hash = "sha256:6c4bcce1a9aa176244b4c011b19b1cea00269c4c6794cd4e90d8dd7990ec3ec9"},
|
||||
{file = "nonebot_plugin_alconna-0.57.0.tar.gz", hash = "sha256:7a9a4bf373f3f6836611dbde1a0917b84441a534dd6f2b20dae3ba6fff142858"},
|
||||
{file = "nonebot_plugin_alconna-0.54.1-py3-none-any.whl", hash = "sha256:4edb4b081cd64ce37717c7a92d31aadd2cf287a5a0adc2ac86ed82d9bcad5048"},
|
||||
{file = "nonebot_plugin_alconna-0.54.1.tar.gz", hash = "sha256:66fae03120b8eff25bb0027d65f149e399aa6f73c7585ebdd388d1904cecdeee"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -10,7 +10,7 @@ authors = [
|
||||
]
|
||||
dependencies = [
|
||||
"nonebot2>=2.4.0",
|
||||
"nonebot-plugin-alconna>=0.57.0",
|
||||
"nonebot-plugin-alconna>=0.48.0",
|
||||
"nonebot-plugin-localstore>=0.7.1",
|
||||
"zhDatetime>=2.0.0",
|
||||
"aiohttp>=3.9",
|
||||
@@ -34,7 +34,7 @@ dependencies = [
|
||||
license = { text = "MIT, Mulan PSL v2" }
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://marshoai-docs.pages.liteyuki.icu/"
|
||||
Homepage = "https://marshoai-docs.meli.liteyuki.icu/"
|
||||
|
||||
|
||||
[tool.nonebot]
|
||||
|
||||
Reference in New Issue
Block a user