Skip to content

Commit

Permalink
feat: add Bot App Home
Browse files Browse the repository at this point in the history
  • Loading branch information
madawei2699 committed Apr 1, 2023
1 parent e61969a commit 84f02c9
Show file tree
Hide file tree
Showing 4 changed files with 137 additions and 21 deletions.
4 changes: 3 additions & 1 deletion app/daily_hot_news.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,9 @@ def cut_string(text):

def get_summary_from_gpt_thread(url):
news_summary_prompt = '请用中文简短概括这篇文章的内容。'
return str(get_answer_from_llama_web([news_summary_prompt], [url]))
gpt_response, total_llm_model_tokens, total_embedding_model_tokens = get_answer_from_llama_web([news_summary_prompt], [url])
logging.info(f"=====> GPT response: {gpt_response} (total_llm_model_tokens: {total_llm_model_tokens}, total_embedding_model_tokens: {total_embedding_model_tokens}")
return str(gpt_response)

def get_summary_from_gpt(url):
with concurrent.futures.ThreadPoolExecutor() as executor:
Expand Down
43 changes: 25 additions & 18 deletions app/gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,17 +78,6 @@ def get_documents_from_urls(urls):
documents.append(Document(f"Can't get transcript from youtube video: {url}"))
return documents

def get_answer_from_chatGPT(messages):
dialog_messages = format_dialog_messages(messages)
logging.info('=====> Use chatGPT to answer!')
logging.info(dialog_messages)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": dialog_messages}]
)
logging.info(completion.usage)
return completion.choices[0].message.content

def get_index_from_web_cache(name):
web_cache_file = index_cache_web_dir / name
if not web_cache_file.is_file():
Expand All @@ -107,6 +96,23 @@ def get_index_from_file_cache(name):
f"=====> Get index from file cache: {file_cache_file}")
return index

def get_index_name_from_file(file: str):
file_md5_with_extension = str(Path(file).relative_to(index_cache_file_dir).name)
file_md5 = file_md5_with_extension.split('.')[0]
return file_md5 + '.json'

def get_answer_from_chatGPT(messages):
dialog_messages = format_dialog_messages(messages)
logging.info('=====> Use chatGPT to answer!')
logging.info(dialog_messages)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": dialog_messages}]
)
logging.info(completion.usage)
total_tokens = completion.usage.total_tokens
return completion.choices[0].message.content, total_tokens, None

def get_answer_from_llama_web(messages, urls):
dialog_messages = format_dialog_messages(messages)
lang_code = get_language_code(remove_prompt_from_text(messages[-1]))
Expand All @@ -128,12 +134,10 @@ def get_answer_from_llama_web(messages, urls):
logging.info(dialog_messages)
logging.info('=====> text_qa_template')
logging.info(prompt)
return index.query(dialog_messages, llm_predictor=llm_predictor, text_qa_template=prompt)

def get_index_name_from_file(file: str):
file_md5_with_extension = str(Path(file).relative_to(index_cache_file_dir).name)
file_md5 = file_md5_with_extension.split('.')[0]
return file_md5 + '.json'
answer = index.query(dialog_messages, llm_predictor=llm_predictor, text_qa_template=prompt)
total_llm_model_tokens = llm_predictor.last_token_usage
total_embedding_model_tokens = index.embed_model.last_token_usage
return answer, total_llm_model_tokens, total_embedding_model_tokens

def get_answer_from_llama_file(messages, file):
dialog_messages = format_dialog_messages(messages)
Expand All @@ -153,7 +157,10 @@ def get_answer_from_llama_file(messages, file):
logging.info(dialog_messages)
logging.info('=====> text_qa_template')
logging.info(prompt)
return index.query(dialog_messages, llm_predictor=llm_predictor, text_qa_template=prompt)
answer = index.query(dialog_messages, llm_predictor=llm_predictor, text_qa_template=prompt)
total_llm_model_tokens = llm_predictor.last_token_usage
total_embedding_model_tokens = index.embed_model.last_token_usage
return answer, total_llm_model_tokens, total_embedding_model_tokens

def get_text_from_whisper(voice_file_path):
with open(voice_file_path, "rb") as f:
Expand Down
58 changes: 56 additions & 2 deletions app/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from app.rate_limiter import RateLimiter
from app.slash_command import register_slack_slash_commands
from app.ttl_set import TtlSet
from app.user import get_user, update_message_token_usage
from app.util import md5

class Config:
Expand Down Expand Up @@ -145,6 +146,24 @@ def format_dialog_text(text, voicemessage=None):
return voicemessage if voicemessage else ''
return insert_space(text.replace("<@U051JKES6Q1>", "")) + ('\n' + voicemessage if voicemessage else '')

def generate_message_id(channel, thread_ts):
return f"{channel}-{thread_ts}"

def update_token_usage(event, total_llm_model_tokens, total_embedding_model_tokens):
try:
user = event["user"]
message_id = generate_message_id(event["channel"], event["ts"])
message_type = 'text' if 'text' in event else 'file'
if 'files' in event:
filetype = event['files'][0]["filetype"]
if filetype in filetype_voice_extension_allowed:
message_type = 'voice'
result = update_message_token_usage(user, message_id, message_type, total_llm_model_tokens, total_embedding_model_tokens)
if not result:
logging.error(f"Failed to update message token usage for {message_id}")
except Exception as e:
logging.error(e)

def bot_process(event, say, logger):
user = event["user"]
thread_ts = event["ts"]
Expand Down Expand Up @@ -207,7 +226,8 @@ def bot_process(event, say, logger):
future = executor.submit(get_answer_from_chatGPT, thread_message_history[parent_thread_ts]['dialog_texts'])

try:
gpt_response = future.result(timeout=300)
gpt_response, total_llm_model_tokens, total_embedding_model_tokens = future.result(timeout=300)
update_token_usage(event, total_llm_model_tokens, total_embedding_model_tokens)
update_thread_history(parent_thread_ts, 'chatGPT: %s' % insert_space(f'{gpt_response}'))
logger.info(gpt_response)
if voicemessage is None:
Expand Down Expand Up @@ -257,16 +277,50 @@ def log_message(logger, event, say):
@slack_app.event("app_home_opened")
def update_home_tab(client, event, logger):
try:
user_info = get_user(event["user"])
if user_info is None:
user_type = user_info['user_type']
llm_token_usage = user_info['llm_token_usage']
embedding_token_usage = user_info['embedding_token_usage']
message_count = user_info['message_count']
client.views_publish(
user_id=event["user"],
view={
"type": "home",
"blocks": [
{
"type": "header",
"text": {
"type": "plain_text",
"text": "This month's usage",
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*User Type:* {user_type or ''}"
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*User llm token usage:* {llm_token_usage or ''}"
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*User embedding token usage:* {embedding_token_usage or ''}"
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*Welcome home, <@" + event["user"] + "> :house:*"
"text": f"*User message count:* {message_count or ''}"
}
}
]
Expand Down
53 changes: 53 additions & 0 deletions app/user.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
import os
import requests

CF_ACCESS_CLIENT_ID = os.environ.get('CF_ACCESS_CLIENT_ID')
CF_ACCESS_CLIENT_SECRET = os.environ.get('CF_ACCESS_CLIENT_SECRET')

def update_message_token_usage(user_id, message_id, message_type, llm_token_usage=0, embedding_token_usage=0) -> bool:
endpoint_url = "https://api.myreader.io/api/message"
headers = {
'CF-Access-Client-Id': CF_ACCESS_CLIENT_ID,
'CF-Access-Client-Secret': CF_ACCESS_CLIENT_SECRET,
}
data = {
'user': {
"user_from": "slack",
"user_platform_id": user_id
},
"message": {
"message_platform_id": message_id,
"message_type": message_type,
"llm_token_usage": llm_token_usage,
"embedding_token_usage": embedding_token_usage
}
}
response = requests.post(endpoint_url, headers=headers, data=data)
if response.status_code == 200:
try:
json_response = response.json()
if 'error' in json_response:
return False
return True
except:
return "Error: Unable to parse JSON response"
else:
return f"Error: {response.status_code} - {response.reason}"

def get_user(user_id):
endpoint_url = f"https://api.myreader.io/api/user/slack/{user_id}"
headers = {
'CF-Access-Client-Id': CF_ACCESS_CLIENT_ID,
'CF-Access-Client-Secret': CF_ACCESS_CLIENT_SECRET,
}
response = requests.get(endpoint_url, headers=headers)
if response.status_code == 200:
try:
json_response = response.json()
if 'error' in json_response:
return None
return json_response
except:
return "Error: Unable to parse JSON response"
else:
return f"Error: {response.status_code} - {response.reason}"

0 comments on commit 84f02c9

Please sign in to comment.