Skip to content

Commit

Permalink
Consolidate calls to openai
Browse files Browse the repository at this point in the history
Starting to abstract away the calls to openai
  • Loading branch information
Taytay committed Apr 3, 2023
1 parent 744c5fa commit ae9448c
Show file tree
Hide file tree
Showing 5 changed files with 29 additions and 17 deletions.
10 changes: 3 additions & 7 deletions scripts/agent_manager.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import openai
from llm_utils import create_chat_completion

next_key = 0
agents = {} # key, (task, full_message_history, model)
Expand All @@ -13,13 +14,11 @@ def create_agent(task, prompt, model):
messages = [{"role": "user", "content": prompt}, ]

# Start GTP3 instance
response = openai.ChatCompletion.create(
agent_reply = create_chat_completion(
model=model,
messages=messages,
)

agent_reply = response.choices[0].message["content"]

# Update full message history
messages.append({"role": "assistant", "content": agent_reply})

Expand All @@ -42,14 +41,11 @@ def message_agent(key, message):
messages.append({"role": "user", "content": message})

# Start GTP3 instance
response = openai.ChatCompletion.create(
agent_reply = create_chat_completion(
model=model,
messages=messages,
)

# Get agent response
agent_reply = response.choices[0].message["content"]

# Update full message history
messages.append({"role": "assistant", "content": agent_reply})

Expand Down
8 changes: 3 additions & 5 deletions scripts/browse.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from readability import Document
import openai
from config import Config
from llm_utils import create_chat_completion

cfg = Config()

Expand Down Expand Up @@ -101,13 +102,11 @@ def summarize_text(text, is_website=True):
chunk},
]

response = openai.ChatCompletion.create(
summary = create_chat_completion(
model=cfg.fast_llm_model,
messages=messages,
max_tokens=300,
)

summary = response.choices[0].message.content
summaries.append(summary)
print("Summarized " + str(len(chunks)) + " chunks.")

Expand All @@ -129,11 +128,10 @@ def summarize_text(text, is_website=True):
combined_summary},
]

response = openai.ChatCompletion.create(
final_summary = create_chat_completion(
model=cfg.fast_llm_model,
messages=messages,
max_tokens=300,
)

final_summary = response.choices[0].message.content
return final_summary
6 changes: 4 additions & 2 deletions scripts/call_ai_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
from config import Config
cfg = Config()

from llm_utils import create_chat_completion

# This is a magic function that can do anything with no-code. See
# https://github.com/Torantulino/AI-Functions for more info.
def call_ai_function(function, args, description, model=cfg.smart_llm_model):
Expand All @@ -18,8 +20,8 @@ def call_ai_function(function, args, description, model=cfg.smart_llm_model):
{"role": "user", "content": args},
]

response = openai.ChatCompletion.create(
response = create_chat_completion(
model=model, messages=messages, temperature=0
)

return response.choices[0].message["content"]
return response
6 changes: 3 additions & 3 deletions scripts/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from config import Config
cfg = Config()

from llm_utils import create_chat_completion

def create_chat_message(role, content):
"""
Expand Down Expand Up @@ -62,13 +63,11 @@ def chat_with_ai(
print("----------- END OF CONTEXT ----------------")

# TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
response = openai.ChatCompletion.create(
assistant_reply = create_chat_completion(
model=cfg.smart_llm_model,
messages=current_context,
)

assistant_reply = response.choices[0].message["content"]

# Update full message history
full_message_history.append(
create_chat_message(
Expand All @@ -79,5 +78,6 @@ def chat_with_ai(

return assistant_reply
except openai.error.RateLimitError:
# TODO: WHen we switch to langchain, this is built in
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
time.sleep(10)
16 changes: 16 additions & 0 deletions scripts/llm_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import openai
from config import Config
cfg = Config()

openai.api_key = cfg.openai_api_key

# Overly simple abstraction until we create something better
def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens
)

return response.choices[0].message["content"]

0 comments on commit ae9448c

Please sign in to comment.