Skip to content

Commit

Permalink
Merge pull request Ikaros-521#770 from Ikaros-521/owner
Browse files Browse the repository at this point in the history
LLM新增gpt4free,具体哪些供应商好用得自行测试
  • Loading branch information
Ikaros-521 authored Apr 19, 2024
2 parents e4bd526 + aee1bef commit af5389e
Show file tree
Hide file tree
Showing 8 changed files with 344 additions and 10 deletions.
17 changes: 14 additions & 3 deletions config.json
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,16 @@
"frequency_penalty": 0.0,
"preset": "请扮演一个AI虚拟主播。不要回答任何敏感问题!不要强调你是主播,只需要回答问题!"
},
"gpt4free": {
"provider": "",
"api_key": "",
"model": "gpt-3.5-turbo",
"max_tokens": 2048,
"proxy": "http://127.0.0.1:10809",
"preset": "请扮演一个AI虚拟主播。不要回答任何敏感问题!不要强调你是主播,只需要回答问题!",
"history_enable": true,
"history_max_len": 300
},
"claude": {
"slack_user_token": "",
"bot_user_id": ""
Expand Down Expand Up @@ -692,7 +702,7 @@
}
},
"choose_song": {
"enable": true,
"enable": false,
"similarity": 0.5,
"start_cmd": [
"点歌 ",
Expand Down Expand Up @@ -1037,7 +1047,7 @@
}
},
"key_mapping": {
"enable": true,
"enable": false,
"type": "弹幕+回复",
"key_trigger_type": "关键词+礼物",
"key_single_sentence_trigger_once": true,
Expand Down Expand Up @@ -1607,7 +1617,8 @@
"gemini": true,
"qanything": true,
"koboldcpp": true,
"anythingllm": true
"anythingllm": true,
"gpt4free": true
},
"tts": {
"edge-tts": true,
Expand Down
17 changes: 14 additions & 3 deletions config.json.bak
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,16 @@
"frequency_penalty": 0.0,
"preset": "请扮演一个AI虚拟主播。不要回答任何敏感问题!不要强调你是主播,只需要回答问题!"
},
"gpt4free": {
"provider": "",
"api_key": "",
"model": "gpt-3.5-turbo",
"max_tokens": 2048,
"proxy": "http://127.0.0.1:10809",
"preset": "请扮演一个AI虚拟主播。不要回答任何敏感问题!不要强调你是主播,只需要回答问题!",
"history_enable": true,
"history_max_len": 300
},
"claude": {
"slack_user_token": "",
"bot_user_id": ""
Expand Down Expand Up @@ -692,7 +702,7 @@
}
},
"choose_song": {
"enable": true,
"enable": false,
"similarity": 0.5,
"start_cmd": [
"点歌 ",
Expand Down Expand Up @@ -1037,7 +1047,7 @@
}
},
"key_mapping": {
"enable": true,
"enable": false,
"type": "弹幕+回复",
"key_trigger_type": "关键词+礼物",
"key_single_sentence_trigger_once": true,
Expand Down Expand Up @@ -1607,7 +1617,8 @@
"gemini": true,
"qanything": true,
"koboldcpp": true,
"anythingllm": true
"anythingllm": true,
"gpt4free": true
},
"tts": {
"edge-tts": true,
Expand Down
107 changes: 107 additions & 0 deletions tests/test_gpt4free/api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
import json, logging
# pip install undetected_chromedriver platformdirs curl_cffi aiohttp_socks g4f
import g4f
from g4f.client import Client

# from utils.common import Common
# from utils.logger import Configure_logger


class GPT4Free:
def __init__(self, data):
# self.common = Common()
# 日志文件路径
# file_path = "./log/log-" + self.common.get_bj_time(1) + ".txt"
# Configure_logger(file_path)

self.config_data = data
self.api_key = None if self.config_data["api_key"] == "" else self.config_data["api_key"]

# 创建映射字典
provider_mapping = {
"none": None,
"g4f.Provider.Bing": g4f.Provider.Bing,
"g4f.Provider.ChatgptAi": g4f.Provider.ChatgptAi,
}

proxy = None if data["proxy"] == "" else {"all": data["proxy"]}

self.client = Client(provider=provider_mapping.get(data["provider"], None), proxies=proxy)

self.history = []


def get_resp(self, data):
"""请求对应接口,获取返回值
Args:
data (dict): json数据
Returns:
str: 返回的文本回答
"""
try:
messages = [
{"role": "system", "content": self.config_data["preset"]}
]

if self.config_data["history_enable"]:
for message in self.history:
messages.append(message)

messages.append({"role": "user", "content": data["prompt"]})
else:
messages.append({"role": "user", "content": data["prompt"]})

response = self.client.chat.completions.create(
model="gpt-3.5-turbo",
max_tokens=self.config_data["max_tokens"],
api_key=self.api_key,
messages=messages
)
resp_content = response.choices[0].message.content

if self.config_data["history_enable"]:
if len(self.history) > self.config_data["history_max_len"]:
self.history.pop(0)
while True:
# 获取嵌套列表中所有字符串的字符数
total_chars = sum(len(string) for sublist in self.history for string in sublist)
# 如果大于限定最大历史数,就剔除第一个元素
if total_chars > self.config_data["history_max_len"]:
self.history.pop(0)
else:
self.history.append({"role": "user", "content": data["prompt"]})
self.history.append({"role": "assistant", "content": resp_content})
break

return resp_content
except Exception as e:
logging.error(e)
return None


if __name__ == '__main__':
# 配置日志输出格式
logging.basicConfig(
level=logging.DEBUG, # 设置日志级别,可以根据需求调整
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)

data = {
"provider": "none",
"api_key": "",
"model": "gpt-3.5-turbo",
"max_tokens": 2048,
"proxy": "http://127.0.0.1:10809",
"preset": "你是一个虚拟主播",
"history_enable": True,
"history_max_len": 300
}
gpt4free = GPT4Free(data)


logging.info(gpt4free.get_resp({"prompt": "你可以扮演猫娘吗,每句话后面加个喵"}))
logging.info(gpt4free.get_resp({"prompt": "早上好"}))

2 changes: 2 additions & 0 deletions utils/gpt_model/gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
from utils.gpt_model.qanything import QAnything
from utils.gpt_model.koboldcpp import Koboldcpp
from utils.gpt_model.anythingllm import AnythingLLM
from utils.gpt_model.gpt4free import GPT4Free

class GPT_Model:
openai = None
Expand All @@ -54,6 +55,7 @@ def set_model_config(self, model_name, config):
"qanything": QAnything,
"koboldcpp": Koboldcpp,
"anythingllm": AnythingLLM,
"gpt4free": GPT4Free,
}

if model_name == "openai":
Expand Down
125 changes: 125 additions & 0 deletions utils/gpt_model/gpt4free.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
import json, logging, traceback
# pip install undetected_chromedriver platformdirs curl_cffi aiohttp_socks g4f
import g4f
from g4f.client import Client

from utils.common import Common
from utils.logger import Configure_logger


class GPT4Free:
def __init__(self, data):
self.common = Common()
# 日志文件路径
file_path = "./log/log-" + self.common.get_bj_time(1) + ".txt"
Configure_logger(file_path)

self.config_data = data
self.api_key = None if self.config_data["api_key"] == "" else self.config_data["api_key"]

# 创建映射字典
provider_mapping = {
"none": None,
"g4f.Provider.Bing": g4f.Provider.Bing,
"g4f.Provider.ChatgptAi": g4f.Provider.ChatgptAi,
"g4f.Provider.Liaobots": g4f.Provider.Liaobots,
"g4f.Provider.OpenaiChat": g4f.Provider.OpenaiChat,
"g4f.Provider.Raycast": g4f.Provider.Raycast,
"g4f.Provider.Theb": g4f.Provider.Theb,
"g4f.Provider.You": g4f.Provider.You,
"g4f.Provider.AItianhuSpace": g4f.Provider.AItianhuSpace,
"g4f.Provider.ChatForAi": g4f.Provider.ChatForAi,
"g4f.Provider.Chatgpt4Online": g4f.Provider.Chatgpt4Online,
"g4f.Provider.ChatgptNext": g4f.Provider.ChatgptNext,
"g4f.Provider.ChatgptX": g4f.Provider.ChatgptX,
"g4f.Provider.FlowGpt": g4f.Provider.FlowGpt,
"g4f.Provider.GptTalkRu": g4f.Provider.GptTalkRu,
"g4f.Provider.Koala": g4f.Provider.Koala,
}

proxy = None if data["proxy"] == "" else {"all": data["proxy"]}

self.client = Client(provider=provider_mapping.get(data["provider"], None), proxies=proxy)

self.history = []


def get_resp(self, data):
"""请求对应接口,获取返回值
Args:
data (dict): json数据
Returns:
str: 返回的文本回答
"""
try:
messages = [
{"role": "system", "content": self.config_data["preset"]}
]

if self.config_data["history_enable"]:
for message in self.history:
messages.append(message)

messages.append({"role": "user", "content": data["prompt"]})
else:
messages.append({"role": "user", "content": data["prompt"]})

logging.debug(f"messages={messages}")

response = self.client.chat.completions.create(
model="gpt-3.5-turbo",
max_tokens=self.config_data["max_tokens"],
api_key=self.api_key,
messages=messages
)

logging.debug(f"response={response}")

resp_content = response.choices[0].message.content

if self.config_data["history_enable"]:
if len(self.history) > self.config_data["history_max_len"]:
self.history.pop(0)
while True:
# 获取嵌套列表中所有字符串的字符数
total_chars = sum(len(string) for sublist in self.history for string in sublist)
# 如果大于限定最大历史数,就剔除第一个元素
if total_chars > self.config_data["history_max_len"]:
self.history.pop(0)
else:
self.history.append({"role": "user", "content": data["prompt"]})
self.history.append({"role": "assistant", "content": resp_content})
break

return resp_content
except Exception as e:
logging.error(traceback.format_exc())
return None


if __name__ == '__main__':
# 配置日志输出格式
logging.basicConfig(
level=logging.DEBUG, # 设置日志级别,可以根据需求调整
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)

data = {
"provider": "none",
"api_key": "",
"model": "gpt-3.5-turbo",
"max_tokens": 2048,
"proxy": "http://127.0.0.1:10809",
"preset": "你是一个虚拟主播",
"history_enable": True,
"history_max_len": 300
}
gpt4free = GPT4Free(data)


logging.info(gpt4free.get_resp({"prompt": "你可以扮演猫娘吗,每句话后面加个喵"}))
logging.info(gpt4free.get_resp({"prompt": "早上好"}))

2 changes: 1 addition & 1 deletion utils/gpt_model/tongyi.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def get_resp(self, prompt):

return resp_content
else:
logging.error(f'Request id: {response.request_id}, Status code: {response.status_code}, error code: {response.code}, error message: {response.message}')
logging.error(f'出错,请查看message信息排查问题,已知问题有:输入数据可能包含不适当的内容\nRequest id: {response.request_id}, Status code: {response.status_code}, error code: {response.code}, error message: {response.message}')
return None
except Exception as e:
logging.error(traceback.format_exc())
Expand Down
7 changes: 5 additions & 2 deletions utils/my_handle.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,12 +133,13 @@ def __init__(self, config_path):
self.qanything = None
self.koboldcpp = None
self.anythingllm = None
self.gpt4free = None

self.image_recognition_model = None

self.chat_type_list = ["chatgpt", "claude", "claude2", "chatglm", "qwen", "chat_with_file", "text_generation_webui", \
"sparkdesk", "langchain_chatglm", "langchain_chatchat", "zhipu", "bard", "yiyan", "tongyi", \
"tongyixingchen", "my_qianfan", "my_wenxinworkshop", "gemini", "qanything", "koboldcpp", "anythingllm"]
"tongyixingchen", "my_qianfan", "my_wenxinworkshop", "gemini", "qanything", "koboldcpp", "anythingllm", "gpt4free"]

# 配置加载
self.config_load()
Expand Down Expand Up @@ -1196,7 +1197,8 @@ def llm_handle(self, chat_type, data, type="chat", webui_show=True):
"""
try:
resp_content = None
# print(f'''data: {data}''')

logging.debug(f"chat_type={chat_type}, data={data}")

if type == "chat":
# 使用 getattr 来动态获取属性
Expand Down Expand Up @@ -1228,6 +1230,7 @@ def llm_handle(self, chat_type, data, type="chat", webui_show=True):
"qanything": lambda: self.qanything.get_resp({"prompt": data["content"]}),
"koboldcpp": lambda: self.koboldcpp.get_resp({"prompt": data["content"]}),
"anythingllm": lambda: self.anythingllm.get_resp({"prompt": data["content"]}),
"gpt4free": lambda: self.gpt4free.get_resp({"prompt": data["content"]}),
"reread": lambda: data["content"]
}
elif type == "vision":
Expand Down
Loading

0 comments on commit af5389e

Please sign in to comment.