diff --git a/README.md b/README.md index f2ca5db09..fb7c6be72 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # 简介 -> chatgpt-on-wechat(简称CoW)项目是基于大模型的智能对话机器人,支持微信公众号、企业微信应用、飞书、钉钉接入,可选择GPT3.5/GPT4.0/Claude/Gemini/LinkAI/ChatGLM/KIMI/文心一言/讯飞星火/通义千问/LinkAI,能处理文本、语音和图片,通过插件访问操作系统和互联网等外部资源,支持基于自有知识库定制企业AI应用。 +> chatgpt-on-wechat(简称CoW)项目是基于大模型的智能对话机器人,支持微信公众号、企业微信应用、飞书、钉钉接入,可选择GPT3.5/GPT4.0/Claude/Gemini/LinkAI/ChatGLM/KIMI/文心一言/讯飞星火/通义千问/LinkAI/Coze,能处理文本、语音和图片,通过插件访问操作系统和互联网等外部资源,支持基于自有知识库定制企业AI应用。 最新版本支持的功能如下: @@ -134,7 +134,9 @@ pip3 install -r requirements-optional.txt ```bash # config.json文件内容示例 { - "model": "gpt-3.5-turbo", # 模型名称, 支持 gpt-3.5-turbo, gpt-4, gpt-4-turbo, wenxin, xunfei, glm-4, claude-3-haiku, moonshot + "model": "gpt-3.5-turbo", # 模型名称, 支持 gpt-3.5-turbo, gpt-4, gpt-4-turbo, wenxin, xunfei, glm-4, claude-3-haiku, moonshot,coze + "coze_api_key": "", # 如果使用Coze模型则填入上面创建的 Coze API KEY + "coze_bot_id": "", # Coze 机器人ID "open_ai_api_key": "YOUR API KEY", # 如果使用openAI模型则填入上面创建的 OpenAI API KEY "proxy": "", # 代理客户端的ip和端口,国内环境开启代理的需要填写该项,如 "127.0.0.1:7890" "single_chat_prefix": ["bot", "@bot"], # 私聊时文本需要包含该前缀才能触发机器人回复 diff --git a/bot/bot_factory.py b/bot/bot_factory.py index a6ef2415b..d913a09f3 100644 --- a/bot/bot_factory.py +++ b/bot/bot_factory.py @@ -16,7 +16,11 @@ def create_bot(bot_type): # return BaiduUnitBot() from bot.baidu.baidu_wenxin import BaiduWenxinBot return BaiduWenxinBot() - + + if bot_type == const.COZE: + from bot.coze.coze import CozeBot + return CozeBot() + elif bot_type == const.CHATGPT: # ChatGPT 网页端web接口 from bot.chatgpt.chat_gpt_bot import ChatGPTBot diff --git a/bot/coze/coze.py b/bot/coze/coze.py new file mode 100644 index 000000000..b5b3c6c9e --- /dev/null +++ b/bot/coze/coze.py @@ -0,0 +1,97 @@ +# encoding:utf-8 + +import requests +import json +from common import const +from bot.bot import Bot +from bot.session_manager import SessionManager +from bridge.context import ContextType +from bridge.reply import Reply, ReplyType +from common.log import logger +from config import conf +from bot.coze.coze_session import CozeSession + +COZE_API_KEY = conf().get("coze_api_key") +COZE_BOT_ID = conf().get("coze_bot_id") + +class CozeBot(Bot): + + def __init__(self): + super().__init__() + self.sessions = SessionManager(CozeSession, model="coze") + + def reply(self, query, context=None): + # acquire reply content + if context and context.type: + if context.type == ContextType.TEXT: + # logger.info("[COZE] query={}".format(query)) + session_id = context["session_id"] + reply = None + if query == "#清除记忆": + self.sessions.clear_session(session_id) + reply = Reply(ReplyType.INFO, "记忆已清除") + elif query == "#清除所有": + self.sessions.clear_all_session() + reply = Reply(ReplyType.INFO, "所有人记忆已清除") + else: + session = self.sessions.session_query(query, session_id) + result = self.reply_text(query) + total_tokens, completion_tokens, reply_content = ( + result["total_tokens"], + result["completion_tokens"], + result["content"], + ) + logger.debug( + "[COZE] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(session.messages, session_id, reply_content, completion_tokens) + ) + + if total_tokens == 0: + reply = Reply(ReplyType.ERROR, reply_content) + else: + self.sessions.session_reply(reply_content, session_id, total_tokens) + reply = Reply(ReplyType.TEXT, reply_content) + return reply + elif context.type == ContextType.IMAGE_CREATE: + ok, retstring = self.create_img(query, 0) + reply = None + if ok: + reply = Reply(ReplyType.IMAGE_URL, retstring) + else: + reply = Reply(ReplyType.ERROR, retstring) + return reply + + def reply_text(self, session: str, retry_count=0): + try: + # logger.info("[COZE] model={}".format(session.model)) + url = "https://api.coze.cn/open_api/v2/chat" + headers = { + 'Content-Type': 'application/json', + 'Authorization': 'Bearer ' + COZE_API_KEY + } + payload = { + 'query': session, + "conversation_id": "keep", + 'user': "keep", + "bot_id": COZE_BOT_ID, + "stream": False + } + print(payload["query"]) + response = requests.request("POST", url, headers=headers, data=json.dumps(payload)) + response_text = json.loads(response.text) + # logger.info(f"[COZE] response text={response_text}") + res_content = response_text["messages"][1]["content"] + total_tokens = 1 + completion_tokens = 1 + # logger.info("[COZE] reply={}".format(res_content)) + return { + "total_tokens": total_tokens, + "completion_tokens": completion_tokens, + "content": res_content, + } + except Exception as e: + need_retry = retry_count < 2 + logger.warn("[COZE] Exception: {}".format(e)) + need_retry = False + self.sessions.clear_session(session.session_id) + result = {"total_tokens": 0, "completion_tokens": 0, "content": "出错了: {}".format(e)} + return result \ No newline at end of file diff --git a/bot/coze/coze_session.py b/bot/coze/coze_session.py new file mode 100644 index 000000000..adc0ecbf4 --- /dev/null +++ b/bot/coze/coze_session.py @@ -0,0 +1,53 @@ +from bot.session_manager import Session +from common.log import logger + +""" + e.g. [ + {"role": "user", "content": "Who won the world series in 2020?"}, + {"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."}, + {"role": "user", "content": "Where was it played?"} + ] +""" + + +class CozeSession(Session): + def __init__(self, session_id, system_prompt=None, model="gpt-3.5-turbo"): + super().__init__(session_id, system_prompt) + self.model = model + # 百度文心不支持system prompt + # self.reset() + + def discard_exceeding(self, max_tokens, cur_tokens=None): + precise = True + try: + cur_tokens = self.calc_tokens() + except Exception as e: + precise = False + if cur_tokens is None: + raise e + logger.debug("Exception when counting tokens precisely for query: {}".format(e)) + while cur_tokens > max_tokens: + if len(self.messages) >= 2: + self.messages.pop(0) + self.messages.pop(0) + else: + logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages))) + break + if precise: + cur_tokens = self.calc_tokens() + else: + cur_tokens = cur_tokens - max_tokens + return cur_tokens + + def calc_tokens(self): + return num_tokens_from_messages(self.messages, self.model) + + +def num_tokens_from_messages(messages, model): + """Returns the number of tokens used by a list of messages.""" + tokens = 0 + for msg in messages: + # 官方token计算规则暂不明确: "大约为 token数为 "中文字 + 其他语种单词数 x 1.3" + # 这里先直接根据字数粗略估算吧,暂不影响正常使用,仅在判断是否丢弃历史会话的时候会有偏差 + tokens += len(msg["content"]) + return tokens diff --git a/bridge/bridge.py b/bridge/bridge.py index b7b3ebf84..089052fa7 100644 --- a/bridge/bridge.py +++ b/bridge/bridge.py @@ -28,6 +28,8 @@ def __init__(self): self.btype["chat"] = const.OPEN_AI if conf().get("use_azure_chatgpt", False): self.btype["chat"] = const.CHATGPTONAZURE + if model_type in ["coze"]: + self.btype["chat"] = const.COZE if model_type in ["wenxin", "wenxin-4"]: self.btype["chat"] = const.BAIDU if model_type in ["xunfei"]: diff --git a/common/const.py b/common/const.py index 68d3795cd..f0351359f 100644 --- a/common/const.py +++ b/common/const.py @@ -2,6 +2,7 @@ OPEN_AI = "openAI" CHATGPT = "chatGPT" BAIDU = "baidu" # 百度文心一言模型 +COZE = "coze" # 头条 coze XUNFEI = "xunfei" CHATGPTONAZURE = "chatGPTOnAzure" LINKAI = "linkai" diff --git a/config.py b/config.py index b6ae49e1c..f8b2df92f 100644 --- a/config.py +++ b/config.py @@ -11,6 +11,9 @@ # 将所有可用的配置项写在字典里, 请使用小写字母 # 此处的配置值无实际意义,程序不会读取此处的配置,仅用于提示格式,请将配置加入到config.json中 available_setting = { + # coze 配置 + "coze_api_key": "", # coze个人令牌 + "coze_bot_id": "", # coze bot id 在bot设置页面的 url 最后一串数字 # openai api配置 "open_ai_api_key": "", # openai api key # openai apibase,当use_azure_chatgpt为true时,需要设置对应的api base diff --git a/plugins/role/role.py b/plugins/role/role.py index 7c7b1067b..7029d802c 100644 --- a/plugins/role/role.py +++ b/plugins/role/role.py @@ -99,7 +99,7 @@ def on_handle_context(self, e_context: EventContext): if e_context["context"].type != ContextType.TEXT: return btype = Bridge().get_bot_type("chat") - if btype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.QWEN_DASHSCOPE, const.XUNFEI, const.BAIDU, const.ZHIPU_AI, const.MOONSHOT, const.MiniMax, const.LINKAI]: + if btype not in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.QWEN_DASHSCOPE, const.XUNFEI, const.BAIDU, const.ZHIPU_AI, const.MOONSHOT, const.MiniMax, const.LINKAI, const.COZE]: logger.debug(f'不支持的bot: {btype}') return bot = Bridge().get_bot("chat")