Skip to content

Commit

Permalink
智谱AI新增 应用 功能的对接;动态文案 前端新增文案组添加删除功能,新增LLM类型,可以指定动态文案选用的LLM
Browse files Browse the repository at this point in the history
  • Loading branch information
Ikaros-521 committed Feb 28, 2024
1 parent fa294ba commit c3d1103
Show file tree
Hide file tree
Showing 10 changed files with 430 additions and 13 deletions.
2 changes: 2 additions & 0 deletions config.json
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,7 @@
"zhipu": {
"api_key": "",
"model": "chatglm_lite",
"app_id": "1761340125461340161",
"top_p": "0.7",
"temperature": "0.9",
"history_enable": true,
Expand Down Expand Up @@ -744,6 +745,7 @@
},
"trends_copywriting": {
"enable": false,
"llm_type": "chatgpt",
"copywriting": [
{
"folder_path": "data/动态文案1",
Expand Down
2 changes: 2 additions & 0 deletions config.json.bak
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,7 @@
"zhipu": {
"api_key": "",
"model": "chatglm_lite",
"app_id": "1761340125461340161",
"top_p": "0.7",
"temperature": "0.9",
"history_enable": true,
Expand Down Expand Up @@ -744,6 +745,7 @@
},
"trends_copywriting": {
"enable": false,
"llm_type": "chatgpt",
"copywriting": [
{
"folder_path": "data/动态文案1",
Expand Down
2 changes: 1 addition & 1 deletion main.py
Original file line number Diff line number Diff line change
Expand Up @@ -611,7 +611,7 @@ async def run_trends_copywriting():
}

# 调用函数进行LLM处理,以及生成回复内容,进行音频合成,需要好好考虑考虑实现
data_json["content"] = my_handle.llm_handle(config.get("chat_type"), data_json)
data_json["content"] = my_handle.llm_handle(config.get("trends_copywriting", "llm_type"), data_json)
else:
data_json = {
"username": "trends_copywriting",
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -70,4 +70,5 @@ pygtrans
jieba
gradio==4.16.0
TikTokLive
azure-cognitiveservices-speech
azure-cognitiveservices-speech
pyjwt
3 changes: 2 additions & 1 deletion requirements_common.txt
Original file line number Diff line number Diff line change
Expand Up @@ -148,4 +148,5 @@ pygtrans==1.5.3
jieba==0.42.1
gradio==4.16.0
TikTokLive==5.0.8
azure-cognitiveservices-speech==1.35.0
azure-cognitiveservices-speech==1.35.0
pyjwt==2.8.0
123 changes: 121 additions & 2 deletions tests/test_zhipu/api.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
import zhipuai
import logging
import time
import jwt # 确保这是 PyJWT 库
import requests
import traceback
from urllib.parse import urljoin

# from utils.common import Common
# from utils.logger import Configure_logger


class Zhipu:
Expand All @@ -9,6 +17,8 @@ def __init__(self, data):
# file_path = "./log/log-" + self.common.get_bj_time(1) + ".txt"
# Configure_logger(file_path)

self.config_data = data

zhipuai.api_key = data["api_key"]
self.model = data["model"]
self.top_p = float(data["top_p"])
Expand All @@ -21,8 +31,44 @@ def __init__(self, data):
self.bot_name = data["bot_name"]
self.user_name = data["user_name"]

# 非SDK
self.base_url = "https://open.bigmodel.cn"
self.token = None
self.headers = None
if self.model == "应用":
try:
self.token = self.generate_token(apikey=self.config_data["api_key"], exp_seconds=30 * 24 * 3600)

self.headers = {
"Authorization": f"Bearer {self.token}",
}

url = urljoin(self.base_url, "/api/llm-application/open/application")

data = {
"page": 1,
"size": 100
}

# get请求
response = requests.get(url=url, data=data, headers=self.headers)

logging.debug(response.json())

resp_json = response.json()

tmp_content = "智谱应用列表:"

for data in resp_json["data"]["list"]:
tmp_content += f"\n应用名:{data['name']},应用ID:{data['id']},知识库:{data['knowledge_ids']}"

logging.info(tmp_content)
except Exception as e:
logging.error(traceback.format_exc())

self.history = []

# 进行智谱AI 通用大模型请求
def invoke_example(self, prompt):
response = zhipuai.model_api.invoke(
model=self.model,
Expand All @@ -34,6 +80,7 @@ def invoke_example(self, prompt):

return response

# 进行characterglm请求
def invoke_characterglm(self, prompt):
response = zhipuai.model_api.invoke(
model=self.model,
Expand All @@ -51,6 +98,7 @@ def invoke_characterglm(self, prompt):

return response


def async_invoke_example(self, prompt):
response = zhipuai.model_api.async_invoke(
model=self.model,
Expand Down Expand Up @@ -96,6 +144,28 @@ def query_async_invoke_result_example(self):

return response

# 非SDK鉴权
def generate_token(self, apikey: str, exp_seconds: int):
try:
id, secret = apikey.split(".")
except Exception as e:
raise Exception("invalid apikey", e)

payload = {
"api_key": id,
"exp": int(round(time.time())) + exp_seconds, # PyJWT中exp字段期望的是秒级的时间戳
"timestamp": int(round(time.time() * 1000)), # 如果需要毫秒级时间戳,可以保留这一行
}

# 使用PyJWT编码payload
token = jwt.encode(
payload,
secret,
headers={"alg": "HS256", "sign_type": "SIGN"}
)

return token


def get_resp(self, prompt):
"""请求对应接口,获取返回值
Expand All @@ -117,13 +187,61 @@ def get_resp(self, prompt):

if self.model == "characterglm":
ret = self.invoke_characterglm(data_json)
elif self.model == "应用":
url = urljoin(self.base_url, f"/api/llm-application/open/model-api/{self.config_data['app_id']}/invoke")

self.history.append({"role": "user", "content": prompt})
data = {
"prompt": self.history,
"returnType": "json_string",
# "knowledge_ids": [],
# "document_ids": []
}

response = requests.post(url=url, json=data, headers=self.headers)

try:
resp_json = response.json()

logging.debug(resp_json)

resp_content = resp_json["data"]["content"]

# 启用历史就给我记住!
if self.history_enable:
# 把机器人回答添加到历史记录中
self.history.append({"role": "assistant", "content": resp_content})

while True:
# 获取嵌套列表中所有字符串的字符数
total_chars = sum(len(string) for sublist in self.history for string in sublist)
# 如果大于限定最大历史数,就剔除第1 2个元素
if total_chars > self.history_max_len:
self.history.pop(0)
self.history.pop(0)
else:
break

return resp_content
except Exception as e:
def is_odd(number):
# 检查数除以2的余数是否为1
return number % 2 != 0

# 保持history始终为偶数个
if is_odd(len(self.history)):
self.history.pop(0)

logging.error(traceback.format_exc())
return None

else:
ret = self.invoke_example(data_json)

logging.debug(f"ret={ret}")

if False == ret['success']:
logging.error(f"请求zhipuai失败,错误代码:{ret['code']}{ret['msg']}")
logging.error(f"请求智谱ai失败,错误代码:{ret['code']}{ret['msg']}")
return None

# 启用历史就给我记住!
Expand All @@ -140,7 +258,7 @@ def get_resp(self, prompt):

return ret['data']['choices'][0]['content']
except Exception as e:
logging.error(e)
logging.error(traceback.format_exc())
return None


Expand All @@ -154,6 +272,7 @@ def get_resp(self, prompt):

data = {
"api_key": "",
"app_id": "1761340125461340161",
# chatglm_pro/chatglm_std/chatglm_lite/characterglm
"model": "characterglm",
"top_p": 0.7,
Expand Down
112 changes: 112 additions & 0 deletions tests/test_zhipu/app_api.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
# import time
# import jwt

# def generate_token(apikey: str, exp_seconds: int):
# try:
# id, secret = apikey.split(".")
# except Exception as e:
# raise Exception("invalid apikey", e)

# payload = {
# "api_key": id,
# "exp": int(round(time.time() * 1000)) + exp_seconds * 1000,
# "timestamp": int(round(time.time() * 1000)),
# }

# tmp = jwt.JWT()

# return tmp.encode(
# payload,
# secret,
# alg="HS256",
# optional_headers={"alg": "HS256", "sign_type": "SIGN"},
# )

# print(generate_token("25571a2d0af8583121c3248be7e89d29.AuIX3O5ktKuQVMtC", 30 * 24 * 3600))

import time
import jwt # 确保这是 PyJWT 库
import requests
from urllib.parse import urljoin

def generate_token(apikey: str, exp_seconds: int):
try:
id, secret = apikey.split(".")
except Exception as e:
raise Exception("invalid apikey", e)

payload = {
"api_key": id,
"exp": int(round(time.time())) + exp_seconds, # PyJWT中exp字段期望的是秒级的时间戳
"timestamp": int(round(time.time() * 1000)), # 如果需要毫秒级时间戳,可以保留这一行
}

# 使用PyJWT编码payload
token = jwt.encode(
payload,
secret,
headers={"alg": "HS256", "sign_type": "SIGN"}
)

return token


token = generate_token("", 30 * 24 * 3600)

print(token)

base_url = "https://open.bigmodel.cn"

headers = {
"Authorization": f"Bearer {token}",
}

url = urljoin(base_url, "/api/llm-application/open/application")

data = {
"page": 1,
"size": 20
}

# get请求
response = requests.get(url=url, data=data, headers=headers)

print(response.json())

resp_json = response.json()

tmp_content = "智谱应用列表:"

app_id = None

try:
for data in resp_json["data"]["list"]:
tmp_content += f"\n应用名:{data['name']},应用ID:{data['id']},知识库:{data['knowledge_ids']}"
app_id = data['id']

print(tmp_content)
except Exception as e:
print(e)

def get_resp(prompt):
url = urljoin(base_url, f"/api/llm-application/open/model-api/{app_id}/invoke")
data = {
"prompt": [{"role": "user", "content": prompt}],
"returnType": "json_string",
# "knowledge_ids": [],
# "document_ids": []
}

response = requests.post(url=url, json=data, headers=headers)

try:
print(response.json())

resp_json = response.json()
resp_content = resp_json["data"]["content"]

print(resp_content)
except Exception as e:
print(e)

get_resp("伊卡洛斯和妮姆芙的关系")
4 changes: 2 additions & 2 deletions utils/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -733,11 +733,11 @@ def write_content_to_file(self, file_path, content, write_log=True):
try:
with open(file_path, 'w', encoding='utf-8') as file:
file.write(content)
logging.info(f"内容已成功写入文件:{file_path}")
logging.info(f"写入文件:{file_path},内容:【{content}")

return True
except IOError as e:
logging.error(f"无法写入文件:{file_path}\n{e}")
logging.error(f"无法写入 【{content}】 到文件:{file_path}\n{e}")
return False

# 移动文件到指定路径 src dest
Expand Down
Loading

0 comments on commit c3d1103

Please sign in to comment.