# -*- coding: utf-8 -*- """ AI 聊天插件,支持向量数据库记忆功能 """ import time import uuid <<<<<<< HEAD ======= <<<<<<< HEAD:src/neobot/plugins/ai_chat.py >>>>>>> eb9079744c82f8e254de084a3a089ef91c37e9dc import os import base64 from neobot.core.managers.command_manager import matcher from neobot.models.events.message import GroupMessageEvent, PrivateMessageEvent from neobot.core.managers.vectordb_manager import vectordb_manager from neobot.core.managers.image_manager import image_manager from neobot.core.utils.logger import ModuleLogger from neobot.core.config_loader import global_config <<<<<<< HEAD ======= ======= import markdown from core.managers.command_manager import matcher from models.events.message import GroupMessageEvent, PrivateMessageEvent from models.message import MessageSegment from core.managers.vectordb_manager import vectordb_manager from core.managers.image_manager import image_manager from core.utils.logger import ModuleLogger from core.config_loader import global_config >>>>>>> origin/main:plugins/ai_chat.py >>>>>>> eb9079744c82f8e254de084a3a089ef91c37e9dc logger = ModuleLogger("AIChat") __plugin_meta__ = { "name": "AI 聊天", "description": "支持向量数据库记忆功能的 AI 聊天助手", "usage": "/chat <内容> - 与 AI 进行对话" } try: from openai import AsyncOpenAI OPENAI_AVAILABLE = True except ImportError: OPENAI_AVAILABLE = False async def get_ai_response(user_id: int, group_id: int, user_message: str) -> str: """获取 AI 回复,包含向量数据库记忆""" if not OPENAI_AVAILABLE: return "请先安装 openai 库: pip install openai" <<<<<<< HEAD ======= <<<<<<< HEAD:src/neobot/plugins/ai_chat.py ======= # 从配置中获取 DeepSeek API 配置(复用跨平台插件的配置或全局配置) >>>>>>> origin/main:plugins/ai_chat.py >>>>>>> eb9079744c82f8e254de084a3a089ef91c37e9dc api_key = getattr(global_config.cross_platform, 'deepseek_api_key', None) or "sk-f71322a9fbba4b05a7df969cb4004f06" api_url = getattr(global_config.cross_platform, 'deepseek_api_url', "https://api.deepseek.com/v1") model = getattr(global_config.cross_platform, 'deepseek_model', "deepseek-chat") if api_key == "your-api-key": return "请先在配置中设置 DeepSeek API Key" collection_name = f"chat_memory_{user_id}" memory_context = "" try: results = vectordb_manager.query_texts( collection_name=collection_name, query_texts=[user_message], n_results=3 ) if results and results.get("documents") and results["documents"][0]: memory_context = "\n\n相关历史记忆:\n" for i, doc in enumerate(results["documents"][0], 1): memory_context += f"{i}. {doc}\n" except Exception as e: logger.error(f"检索聊天记忆失败: {e}") system_prompt = f"""你是一个友好的 AI 助手。请根据用户的输入进行回复。 如果提供了相关历史记忆,请参考这些记忆来保持对话的连贯性。{memory_context}""" try: client = AsyncOpenAI( api_key=api_key, base_url=api_url.replace("/chat/completions", "") ) response = await client.chat.completions.create( model=model, messages=[ {"role": "system", "content": system_prompt}, {"role": "user", "content": user_message} ], temperature=0.7, max_tokens=1000 ) ai_reply = response.choices[0].message.content if ai_reply: try: doc_id = str(uuid.uuid4()) text_to_embed = f"用户: {user_message}\nAI: {ai_reply}" metadata = { "user_id": user_id, "group_id": group_id, "timestamp": int(time.time()) } vectordb_manager.add_texts( collection_name=collection_name, texts=[text_to_embed], metadatas=[metadata], ids=[doc_id] ) except Exception as e: logger.error(f"保存聊天记忆失败: {e}") return ai_reply except Exception as e: logger.error(f"AI 聊天请求失败: {e}") return f"请求失败: {str(e)}" async def generate_chat_image_base64(user_name: str, user_message: str, ai_reply: str) -> str: """生成聊天图片并返回 Base64 编码""" template_name = "ai_chat.html" user_avatar = user_name[0] if user_name else 'U' data = { "user_name": user_name, "user_message": user_message, "ai_reply": ai_reply, "user_avatar": user_avatar, "width": 800, "height": 600 } output_name = f"chat_{int(time.time())}.png" image_base64 = await image_manager.render_template_to_base64( template_name=template_name, data=data, output_name=output_name, width=800, height=600 ) return image_base64 @matcher.command("chat") async def chat_command(event: GroupMessageEvent | PrivateMessageEvent, args: list[str]): """AI 聊天命令""" if not args: await event.reply("请提供要聊天的内容,例如:/chat 你好") return user_message = " ".join(args) user_id = event.user_id group_id = getattr(event, 'group_id', 0) user_name = event.sender.nickname or event.sender.card or str(user_id) await event.reply("正在思考中...") reply = await get_ai_response(user_id, group_id, user_message) <<<<<<< HEAD ======= <<<<<<< HEAD:src/neobot/plugins/ai_chat.py >>>>>>> eb9079744c82f8e254de084a3a089ef91c37e9dc try: image_base64 = await generate_chat_image_base64( user_name=str(event.user_id), user_message=user_message, ai_reply=reply ) if image_base64: from neobot.models.message import MessageSegment await event.reply(MessageSegment.image(image_base64)) else: await event.reply(reply) except Exception as e: logger.error(f"生成聊天图片失败: {e}") await event.reply(reply) <<<<<<< HEAD ======= ======= # 将 Markdown 转换为 HTML try: # 启用扩展以支持代码块、表格等 html_reply = markdown.markdown(reply, extensions=['fenced_code', 'tables', 'nl2br']) except Exception as e: logger.error(f"Markdown 转换失败: {e}") html_reply = reply.replace('\n', '
') # 渲染图片 try: template_data = { "user_name": user_name, "user_message": user_message, "ai_reply": html_reply } base64_img = await image_manager.render_template_to_base64( template_name="ai_chat.html", data=template_data, output_name=f"chat_{user_id}_{int(time.time())}.png", image_type="png" ) if base64_img: await event.reply(MessageSegment.image(f"base64://{base64_img}")) else: await event.reply("图片生成失败,返回文本:\n" + reply) except Exception as e: logger.error(f"渲染聊天图片失败: {e}") await event.reply("图片生成失败,返回文本:\n" + reply) >>>>>>> origin/main:plugins/ai_chat.py >>>>>>> eb9079744c82f8e254de084a3a089ef91c37e9dc