Compare commits

...

11 Commits

Author SHA1 Message Date
02d465112f 📝 新增PR预览 2025-03-11 00:03:54 +08:00
d95928cab7 Merge branch 'main' of https://github.com/LiteyukiStudio/nonebot-plugin-marshoai 2025-03-10 23:57:16 +08:00
41cb287a84 修复流式请求思维链未包含在结构体问题 2025-03-10 23:56:13 +08:00
a0f2b52e59 📝 更新 GitHub Actions 工作流以支持推送和拉取请求 2025-03-10 23:38:42 +08:00
75d173bed7 修改引用链接 2025-03-10 23:24:19 +08:00
f39f5cc1be Merge pull request #20 from LiteyukiStudio/snowykami-patch-1
📝 更新pages部署地址
2025-03-10 23:13:32 +08:00
70fd176904 📝 更新pages部署地址 2025-03-10 23:08:57 +08:00
57ea4fc10b 📝 引入神秘小js 2025-03-08 23:31:59 +08:00
a1ddf40610 Merge branch 'main' of https://github.com/LiteyukiStudio/nonebot-plugin-marshoai 2025-03-07 21:34:22 +08:00
dc294a257d 📝 禁用干净 URL 设置 2025-03-07 21:34:19 +08:00
Akarin~
6f085b36c6 流式调用[WIP] (#19)
* 流式调用 30%

* 流式调用 90%
2025-03-07 19:04:51 +08:00
9 changed files with 120 additions and 53 deletions

View File

@@ -1,9 +1,6 @@
name: Deploy VitePress site to Liteyuki PaaS
on:
push:
branches: [main]
workflow_dispatch:
on: ["push", "pull_request_target"]
permissions:
contents: write
@@ -28,7 +25,7 @@ jobs:
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: '3.11'
python-version: "3.11"
- name: Setup API markdown
run: |-
@@ -52,11 +49,10 @@ jobs:
run: |-
pnpm run docs:build
- name: "发布"
run: |
npx -p "@getmeli/cli" meli upload docs/.vitepress/dist \
--url "https://meli.liteyuki.icu" \
--url "https://pages.liteyuki.icu" \
--site "$MELI_SITE" \
--token "$MELI_TOKEN" \
--release "$GITHUB_SHA"

View File

@@ -8,12 +8,13 @@ import { generateSidebar } from 'vitepress-sidebar'
// https://vitepress.dev/reference/site-config
export default defineConfig({
head: [
["script", { src: "https://cdn.liteyuki.icu/js/liteyuki_footer.js" }],
['link', { rel: 'icon', type: 'image/x-icon', href: '/favicon.ico' }],
],
rewrites: {
[`${defaultLang}/:rest*`]: ":rest*",
},
cleanUrls: true,
cleanUrls: false,
themeConfig: {
// https://vitepress.dev/reference/default-theme-config
logo: {

View File

@@ -147,4 +147,5 @@ Add options in the `.env` file from the diagram below in nonebot2 project.
| MARSHOAI_ENABLE_RICHTEXT_PARSE | `bool` | `true` | Turn on auto parse rich text feature(including image, LaTeX equation) |
| MARSHOAI_SINGLE_LATEX_PARSE | `bool` | `false`| Render single-line equation or not |
| MARSHOAI_FIX_TOOLCALLS | `bool` | `true` | Fix tool calls or not |
| MARSHOAI_SEND_THINKING | `bool` | `true` | Send thinking chain or not |
| MARSHOAI_SEND_THINKING | `bool` | `true` | Send thinking chain or not |
| MARSHOAI_STREAM | `bool` | `false`| 是否通过流式方式请求 API **开启此项后暂无法使用函数调用,无法在 Bot 用户侧聊天界面呈现出流式效果** |

View File

@@ -149,6 +149,8 @@ GitHub Models API 的限制较多,不建议使用,建议通过修改`MARSHOA
| MARSHOAI_SINGLE_LATEX_PARSE | `bool` | `false` | 单行公式是否渲染(当消息富文本解析启用时可用)(如果单行也渲……只能说不好看) |
| MARSHOAI_FIX_TOOLCALLS | `bool` | `true` | 是否修复工具调用(部分模型须关闭,使用 vLLM 部署的模型时须关闭) |
| MARSHOAI_SEND_THINKING | `bool` | `true` | 是否发送思维链(部分模型不支持) |
| MARSHOAI_STREAM | `bool` | `false`| 是否通过流式方式请求 API **开启此项后暂无法使用函数调用,无法在 Bot 用户侧聊天界面呈现出流式效果** |
#### 开发及调试选项

View File

@@ -32,6 +32,7 @@ class ConfigModel(BaseModel):
marshoai_enable_sysasuser_prompt: bool = False
marshoai_additional_prompt: str = ""
marshoai_poke_suffix: str = "揉了揉你的猫耳"
marshoai_stream: bool = False
marshoai_enable_richtext_parse: bool = True
"""
是否启用自动消息富文本解析 即若包含图片链接则发送图片、若包含LaTeX公式则发送公式图。

View File

@@ -18,8 +18,9 @@ from nonebot.matcher import (
current_matcher,
)
from nonebot_plugin_alconna.uniseg import UniMessage, UniMsg
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletion, ChatCompletionMessage
from openai import AsyncOpenAI, AsyncStream
from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
from openai.types.chat.chat_completion import Choice
from .config import config
from .constants import SUPPORT_IMAGE_MODELS
@@ -94,9 +95,10 @@ class MarshoHandler:
self,
user_message: Union[str, list],
model_name: str,
tools_list: list,
tools_list: list | None,
tool_message: Optional[list] = None,
) -> ChatCompletion:
stream: bool = False,
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
"""
处理单条聊天
"""
@@ -109,12 +111,13 @@ class MarshoHandler:
msg=context_msg + [UserMessage(content=user_message).as_dict()] + (tool_message if tool_message else []), # type: ignore
model_name=model_name,
tools=tools_list if tools_list else None,
stream=stream,
)
return response
async def handle_function_call(
self,
completion: ChatCompletion,
completion: Union[ChatCompletion, AsyncStream[ChatCompletionChunk]],
user_message: Union[str, list],
model_name: str,
tools_list: list,
@@ -122,7 +125,10 @@ class MarshoHandler:
# function call
# 需要获取额外信息,调用函数工具
tool_msg = []
choice = completion.choices[0]
if isinstance(completion, ChatCompletion):
choice = completion.choices[0]
else:
raise ValueError("Unexpected completion type")
# await UniMessage(str(response)).send()
tool_calls = choice.message.tool_calls
# try:
@@ -191,14 +197,20 @@ class MarshoHandler:
"""
global target_list
if stream:
raise NotImplementedError
response = await self.handle_single_chat(
user_message=user_message,
model_name=model_name,
tools_list=tools_list,
tool_message=tool_message,
)
choice = response.choices[0]
response = await self.handle_stream_request(
user_message=user_message,
model_name=model_name,
tools_list=tools_list,
tools_message=tool_message,
)
else:
response = await self.handle_single_chat( # type: ignore
user_message=user_message,
model_name=model_name,
tools_list=tools_list,
tool_message=tool_message,
)
choice = response.choices[0] # type: ignore
# Sprint(choice)
# 当tool_calls非空时将finish_reason设置为TOOL_CALLS
if choice.message.tool_calls is not None and config.marshoai_fix_toolcalls:
@@ -240,3 +252,76 @@ class MarshoHandler:
else:
await UniMessage(f"意外的完成原因:{choice.finish_reason}").send()
return None
async def handle_stream_request(
self,
user_message: Union[str, list],
model_name: str,
tools_list: list,
tools_message: Optional[list] = None,
) -> Union[ChatCompletion, None]:
"""
处理流式请求
"""
response = await self.handle_single_chat(
user_message=user_message,
model_name=model_name,
tools_list=None, # TODO:让流式调用支持工具调用
tool_message=tools_message,
stream=True,
)
if isinstance(response, AsyncStream):
reasoning_contents = ""
answer_contents = ""
last_chunk = None
is_first_token_appeared = False
is_answering = False
async for chunk in response:
last_chunk = chunk
# print(chunk)
if not is_first_token_appeared:
logger.debug(f"{chunk.id}: 第一个 token 已出现")
is_first_token_appeared = True
if not chunk.choices:
logger.info("Usage:", chunk.usage)
else:
delta = chunk.choices[0].delta
if (
hasattr(delta, "reasoning_content")
and delta.reasoning_content is not None
):
reasoning_contents += delta.reasoning_content
else:
if not is_answering:
logger.debug(
f"{chunk.id}: 思维链已输出完毕或无 reasoning_content 字段输出"
)
is_answering = True
if delta.content is not None:
answer_contents += delta.content
# print(last_chunk)
# 创建新的 ChatCompletion 对象
if last_chunk and last_chunk.choices:
message = ChatCompletionMessage(
content=answer_contents,
role="assistant",
tool_calls=last_chunk.choices[0].delta.tool_calls, # type: ignore
)
if reasoning_contents != "":
setattr(message, "reasoning_content", reasoning_contents)
choice = Choice(
finish_reason=last_chunk.choices[0].finish_reason, # type: ignore
index=last_chunk.choices[0].index,
message=message,
)
return ChatCompletion(
id=last_chunk.id,
choices=[choice],
created=last_chunk.created,
model=last_chunk.model,
system_fingerprint=last_chunk.system_fingerprint,
object="chat.completion",
usage=last_chunk.usage,
)
return None

View File

@@ -257,7 +257,9 @@ async def marsho(
)
logger.info(f"正在获取回答,模型:{model_name}")
# logger.info(f"上下文:{context_msg}")
response = await handler.handle_common_chat(usermsg, model_name, tools_lists)
response = await handler.handle_common_chat(
usermsg, model_name, tools_lists, config.marshoai_stream
)
# await UniMessage(str(response)).send()
if response is not None:
context_user, context_assistant = response
@@ -293,7 +295,7 @@ with contextlib.suppress(ImportError): # 优化先不做()
),
],
)
choice = response.choices[0]
choice = response.choices[0] # type: ignore
if choice.finish_reason == CompletionsFinishReason.STOPPED:
content = extract_content_and_think(choice.message)[0]
await UniMessage(" " + str(content)).send(at_sender=True)

View File

@@ -5,7 +5,7 @@ from .constants import USAGE
metadata = PluginMetadata(
name="Marsho AI 插件",
description="接入 Azure API 或其他 API 的 AI 聊天插件,支持图片处理,外部函数调用,兼容包括 DeepSeek-R1 在内的多个模型",
description="接入 Azure API 或其他 API 的 AI 聊天插件,支持图片处理,外部函数调用,兼容包括 DeepSeek-R1 QwQ-32B 在内的多个模型",
usage=USAGE,
type="application",
config=ConfigModel,

View File

@@ -3,7 +3,7 @@ import json
import mimetypes
import re
import uuid
from typing import Any, Dict, List, Optional
from typing import Any, Dict, List, Optional, Union
import aiofiles # type: ignore
import httpx
@@ -15,8 +15,8 @@ from nonebot.log import logger
from nonebot_plugin_alconna import Image as ImageMsg
from nonebot_plugin_alconna import Text as TextMsg
from nonebot_plugin_alconna import UniMessage
from openai import AsyncOpenAI, NotGiven
from openai.types.chat import ChatCompletion, ChatCompletionMessage
from openai import AsyncOpenAI, AsyncStream, NotGiven
from openai.types.chat import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
from zhDateTime import DateTime
from ._types import DeveloperMessage
@@ -109,35 +109,13 @@ async def get_image_b64(url: str, timeout: int = 10) -> Optional[str]:
return None
async def make_chat(
client: ChatCompletionsClient,
msg: list,
model_name: str,
tools: Optional[list] = None,
):
"""
调用ai获取回复
参数:
client: 用于与AI模型进行通信
msg: 消息内容
model_name: 指定AI模型名
tools: 工具列表
"""
return await client.complete(
messages=msg,
model=model_name,
tools=tools,
**config.marshoai_model_args,
)
async def make_chat_openai(
client: AsyncOpenAI,
msg: list,
model_name: str,
tools: Optional[list] = None,
) -> ChatCompletion:
stream: bool = False,
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
"""
使用 Openai SDK 调用ai获取回复
@@ -152,6 +130,7 @@ async def make_chat_openai(
model=model_name,
tools=tools or NOT_GIVEN,
timeout=config.marshoai_timeout,
stream=stream,
**config.marshoai_model_args,
)