Skip to content

Commit

Permalink
对接 QAnything
Browse files Browse the repository at this point in the history
  • Loading branch information
Ikaros-521 committed Feb 19, 2024
1 parent 508c393 commit 5bd75ee
Show file tree
Hide file tree
Showing 8 changed files with 299 additions and 48 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
<a href="//github.com/Ikaros-521/AI-Vtuber/network"><img alt="GitHub forks" src="https://img.shields.io/github/forks/Ikaros-521/AI-Vtuber?color=%2300BFFF&style=flat-square"></a>
<a href="//www.python.org"><img src="https://img.shields.io/badge/python-3.10+-blue.svg" alt="python"></a>

`Luna AI` 是一款结合了最先进技术的虚拟AI主播。它的核心是一系列高效的人工智能模型,包括 `ChatterBot、GPT、Claude、langchain、chatglm、text-generation-webui、讯飞星火、智谱AI、谷歌Bard、文心一言、通义星尘、千帆大模型、GeminiKimi Chat`。这些模型既可以在本地运行,也可以通过云端服务提供支持。
`Luna AI` 是一款结合了最先进技术的虚拟AI主播。它的核心是一系列高效的人工智能模型,包括 `ChatterBot、GPT、Claude、langchain、chatglm、text-generation-webui、讯飞星火、智谱AI、谷歌Bard、文心一言、通义星尘、千帆大模型、GeminiKimi Chat 和 QAnything`。这些模型既可以在本地运行,也可以通过云端服务提供支持。

`Luna AI` 的外观由 `Live2D、Vtube Studio、xuniren 和 UE5 结合 Audio2Face` 技术打造,为用户提供了一个生动、互动的虚拟形象。这使得 `Luna AI` 能够在各大直播平台,如 `Bilibili、抖音、快手、微信视频号、斗鱼、YouTube、Twitch 和 TikTok`,进行实时互动直播。当然,它也可以在本地环境中与您进行个性化对话。

Expand Down
10 changes: 9 additions & 1 deletion config.json
Original file line number Diff line number Diff line change
Expand Up @@ -350,6 +350,13 @@
"history_enable": true,
"history_max_len": 4096
},
"qanything": {
"api_ip_port": "http://127.0.0.1:8777",
"user_id": "zzp",
"kb_ids": ["KB2435554f1fb348ad84a1eb60eaa1c466"],
"history_enable": true,
"history_max_len": 300
},
"local_qa": {
"text": {
"enable": true,
Expand Down Expand Up @@ -1250,7 +1257,8 @@
"tongyi": true,
"tongyixingchen": true,
"my_wenxinworkshop": true,
"gemini": true
"gemini": true,
"qanything": true
},
"tts": {
"edge-tts": true,
Expand Down
10 changes: 9 additions & 1 deletion config.json.bak
Original file line number Diff line number Diff line change
Expand Up @@ -350,6 +350,13 @@
"history_enable": true,
"history_max_len": 4096
},
"qanything": {
"api_ip_port": "http://127.0.0.1:8777",
"user_id": "zzp",
"kb_ids": ["KB2435554f1fb348ad84a1eb60eaa1c466"],
"history_enable": true,
"history_max_len": 300
},
"local_qa": {
"text": {
"enable": true,
Expand Down Expand Up @@ -1250,7 +1257,8 @@
"tongyi": true,
"tongyixingchen": true,
"my_wenxinworkshop": true,
"gemini": true
"gemini": true,
"qanything": true
},
"tts": {
"edge-tts": true,
Expand Down
111 changes: 111 additions & 0 deletions tests/test_qanything/api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
import json, logging
import requests
from urllib.parse import urljoin

# from utils.common import Common
# from utils.logger import Configure_logger


class QAnything:
def __init__(self, data):
# self.common = Common()
# 日志文件路径
# file_path = "./log/log-" + self.common.get_bj_time(1) + ".txt"
# Configure_logger(file_path)

self.api_ip_port = data["api_ip_port"]
self.config_data = data

self.history = []


# 获取知识库列表
def get_list_knowledge_base(self):
url = urljoin(self.api_ip_port, "/api/local_doc_qa/list_knowledge_base")
try:
response = requests.post(url, json={"user_id": self.config_data["user_id"]})
response.raise_for_status() # 检查响应的状态码

result = response.content
ret = json.loads(result)

logging.debug(ret)
logging.info(f"本地知识库列表:{ret['data']}")

return ret['data']
except Exception as e:
logging.error(e)
return None


def get_resp(self, data):
"""请求对应接口,获取返回值
Args:
data (dict): json数据
Returns:
str: 返回的文本回答
"""
try:
url = self.api_ip_port + "/api/local_doc_qa/local_doc_chat"

data_json = {
"user_id": self.config_data["user_id"],
"kb_ids": self.config_data["kb_ids"],
"question": data["prompt"],
"history": self.history
}

response = requests.post(url=url, json=data_json)
response.raise_for_status() # 检查响应的状态码

result = response.content
ret = json.loads(result)

logging.info(ret)

resp_content = ret["response"]

# 启用历史就给我记住!
if self.config_data["history_enable"]:
self.history = ret["history"]

while True:
# 计算所有字符数
total_chars = sum(len(item) for sublist in self.history for item in sublist)

# 如果大于限定最大历史数,就剔除第一个元素
if total_chars > self.config_data["history_max_len"]:
self.history.pop(0)
else:
break

return resp_content
except Exception as e:
logging.error(e)
return None


if __name__ == '__main__':
# 配置日志输出格式
logging.basicConfig(
level=logging.DEBUG, # 设置日志级别,可以根据需求调整
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)

data = {
"api_ip_port": "http://127.0.0.1:8777",
"user_id": "zzp",
"kb_ids": ["KB2435554f1fb348ad84a1eb60eaa1c466"],
"history_enable": True,
"history_max_len": 300
}
qanything = QAnything(data)


qanything.get_list_knowledge_base()
logging.info(qanything.get_resp({"prompt": "伊卡洛斯和妮姆芙的关系"}))
logging.info(qanything.get_resp({"prompt": "伊卡洛斯的英文名"}))

4 changes: 3 additions & 1 deletion utils/gpt_model/gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from utils.gpt_model.my_qianfan import My_QianFan
from utils.gpt_model.my_wenxinworkshop import My_WenXinWorkShop
from utils.gpt_model.gemini import Gemini
from utils.gpt_model.qanything import QAnything

class GPT_Model:
openai = None
Expand All @@ -47,7 +48,8 @@ def set_model_config(self, model_name, config):
"tongyixingchen": TongYiXingChen,
"my_wenxinworkshop": My_WenXinWorkShop,
"my_qianfan": My_QianFan,
"gemini": Gemini
"gemini": Gemini,
"qanything": QAnything
}

if model_name == "openai":
Expand Down
113 changes: 113 additions & 0 deletions utils/gpt_model/qanything.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
import json, logging
import requests
from urllib.parse import urljoin

from utils.common import Common
from utils.logger import Configure_logger


class QAnything:
def __init__(self, data):
self.common = Common()
# 日志文件路径
file_path = "./log/log-" + self.common.get_bj_time(1) + ".txt"
Configure_logger(file_path)

self.api_ip_port = data["api_ip_port"]
self.config_data = data

self.history = []

self.get_list_knowledge_base()


# 获取知识库列表
def get_list_knowledge_base(self):
url = urljoin(self.api_ip_port, "/api/local_doc_qa/list_knowledge_base")
try:
response = requests.post(url, json={"user_id": self.config_data["user_id"]})
response.raise_for_status() # 检查响应的状态码

result = response.content
ret = json.loads(result)

logging.debug(ret)
logging.info(f"本地知识库列表:{ret['data']}")

return ret['data']
except Exception as e:
logging.error(e)
return None


def get_resp(self, data):
"""请求对应接口,获取返回值
Args:
data (dict): json数据
Returns:
str: 返回的文本回答
"""
try:
url = self.api_ip_port + "/api/local_doc_qa/local_doc_chat"

data_json = {
"user_id": self.config_data["user_id"],
"kb_ids": self.config_data["kb_ids"],
"question": data["prompt"],
"history": self.history
}

response = requests.post(url=url, json=data_json)
response.raise_for_status() # 检查响应的状态码

result = response.content
ret = json.loads(result)

logging.debug(ret)

resp_content = ret["response"]

# 启用历史就给我记住!
if self.config_data["history_enable"]:
self.history = ret["history"]

while True:
# 计算所有字符数
total_chars = sum(len(item) for sublist in self.history for item in sublist)

# 如果大于限定最大历史数,就剔除第一个元素
if total_chars > self.config_data["history_max_len"]:
self.history.pop(0)
else:
break

return resp_content
except Exception as e:
logging.error(e)
return None


if __name__ == '__main__':
# 配置日志输出格式
logging.basicConfig(
level=logging.DEBUG, # 设置日志级别,可以根据需求调整
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)

data = {
"api_ip_port": "http://127.0.0.1:8777",
"user_id": "zzp",
"kb_ids": ["KB2435554f1fb348ad84a1eb60eaa1c466"],
"history_enable": True,
"history_max_len": 300
}
qanything = QAnything(data)


qanything.get_list_knowledge_base()
logging.info(qanything.get_resp({"prompt": "伊卡洛斯和妮姆芙的关系"}))
logging.info(qanything.get_resp({"prompt": "伊卡洛斯的英文名"}))

8 changes: 5 additions & 3 deletions utils/my_handle.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ def __init__(self, config_path):
self.my_qianfan = None
self.my_wenxinworkshop = None
self.gemini = None
self.qanything = None

# 配置加载
self.config_load()
Expand Down Expand Up @@ -849,7 +850,7 @@ def sd_handle(self, data):
# 新增LLM需要在这里追加
if chat_type in ["chatgpt", "claude", "claude2", "chatglm", "chat_with_file", "text_generation_webui", \
"sparkdesk", "langchain_chatglm", "langchain_chatchat", "zhipu", "bard", "yiyan", "tongyi", \
"tongyixingchen", "my_qianfan", "my_wenxinworkshop", "gemini"]:
"tongyixingchen", "my_qianfan", "my_wenxinworkshop", "gemini", "qanything"]:
content = My_handle.config.get("sd", "prompt_llm", "before_prompt") + \
content + My_handle.config.get("after_prompt")

Expand Down Expand Up @@ -1037,7 +1038,7 @@ def tuning_handle(self, data_json):
# 新增LLM需要在这里追加
if chat_type in ["chatgpt", "claude", "claude2", "chatglm", "alice", "chat_with_file", "text_generation_webui", \
"sparkdesk", "langchain_chatglm", "langchain_chatchat", "zhipu", "bard", "yiyan", "tongyi", \
"tongyixingchen", "my_qianfan", "my_wenxinworkshop", "gemini"]:
"tongyixingchen", "my_qianfan", "my_wenxinworkshop", "gemini", "qanything"]:
resp_content = self.llm_handle(chat_type, data_json)
if resp_content is not None:
logging.info(f"[AI回复{My_handle.config.get('talk', 'username')}]:{resp_content}")
Expand Down Expand Up @@ -1108,6 +1109,7 @@ def llm_handle(self, chat_type, data):
"my_qianfan": lambda: self.my_qianfan.get_resp(data["content"]),
"my_wenxinworkshop": lambda: self.my_wenxinworkshop.get_resp(data["content"]),
"gemini": lambda: self.gemini.get_resp(data["content"]),
"qanything": lambda: self.qanything.get_resp({"prompt": data["content"]}),
"reread": lambda: data["content"]
}

Expand Down Expand Up @@ -1794,7 +1796,7 @@ def comment_handle(self, data):
# 新增LLM需要在这里追加
if chat_type in ["chatgpt", "claude", "claude2", "chatglm", "alice", "chat_with_file", "text_generation_webui", \
"sparkdesk", "langchain_chatglm", "langchain_chatchat", "zhipu", "bard", "yiyan", "tongyi", \
"tongyixingchen", "my_qianfan", "my_wenxinworkshop", "gemini"]:
"tongyixingchen", "my_qianfan", "my_wenxinworkshop", "gemini", "qanything"]:
data_json["content"] = My_handle.config.get("before_prompt") + content + My_handle.config.get("after_prompt")
resp_content = self.llm_handle(chat_type, data_json)
if resp_content is not None:
Expand Down
Loading

0 comments on commit 5bd75ee

Please sign in to comment.