Skip to content

Commit

Permalink
Apply autopep8 formatting to entire codebase
Browse files Browse the repository at this point in the history
  • Loading branch information
Torantulino committed Apr 2, 2023
1 parent a2e5de7 commit b4685f6
Show file tree
Hide file tree
Showing 11 changed files with 225 additions and 70 deletions.
15 changes: 10 additions & 5 deletions scripts/agent_manager.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
import openai

next_key = 0
agents = {} # key, (task, full_message_history, model)
agents = {} # key, (task, full_message_history, model)

# Create new GPT agent


def create_agent(task, prompt, model):
global next_key
global agents

messages = [{"role": "user", "content": prompt},]
messages = [{"role": "user", "content": prompt}, ]

# Start GTP3 instance
response = openai.ChatCompletion.create(
Expand All @@ -22,12 +24,15 @@ def create_agent(task, prompt, model):
messages.append({"role": "assistant", "content": agent_reply})

key = next_key
next_key += 1 # This is done instead of len(agents) to make keys unique even if agents are deleted
# This is done instead of len(agents) to make keys unique even if agents
# are deleted
next_key += 1

agents[key] = (task, messages, model)

return key, agent_reply


def message_agent(key, message):
global agents

Expand All @@ -50,12 +55,14 @@ def message_agent(key, message):

return agent_reply


def list_agents():
global agents

# Return a list of agent keys and their tasks
return [(key, task) for key, (task, _, _) in agents.items()]


def delete_agent(key):
global agents

Expand All @@ -64,5 +71,3 @@ def delete_agent(key):
return True
except KeyError:
return False


17 changes: 11 additions & 6 deletions scripts/ai_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,14 @@
import json
import openai

def call_ai_function(function, args, description, model = "gpt-4"):

def call_ai_function(function, args, description, model="gpt-4"):
# parse args to comma seperated string
args = ", ".join(args)
messages = [{"role": "system", "content": f"You are now the following python function: ```# {description}\n{function}```\n\nOnly respond with your `return` value."},{"role": "user", "content": args}]
messages = [{"role": "system",
"content": f"You are now the following python function: ```# {description}\n{function}```\n\nOnly respond with your `return` value."},
{"role": "user",
"content": args}]

response = openai.ChatCompletion.create(
model=model,
Expand All @@ -15,7 +19,8 @@ def call_ai_function(function, args, description, model = "gpt-4"):

return response.choices[0].message["content"]

### Evaluating code
# Evaluating code


def evaluate_code(code: str) -> List[str]:
function_string = "def analyze_code(code: str) -> List[str]:"
Expand All @@ -26,7 +31,7 @@ def evaluate_code(code: str) -> List[str]:
return json.loads(result_string)


### Improving code
# Improving code

def improve_code(suggestions: List[str], code: str) -> str:
function_string = "def generate_improved_code(suggestions: List[str], code: str) -> str:"
Expand All @@ -37,12 +42,12 @@ def improve_code(suggestions: List[str], code: str) -> str:
return result_string


### Writing tests
# Writing tests

def write_tests(code: str, focus: List[str]) -> str:
function_string = "def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
args = [code, json.dumps(focus)]
description_string = """Generates test cases for the existing code, focusing on specific areas if required."""

result_string = call_ai_function(function_string, args, description_string)
return result_string
return result_string
49 changes: 37 additions & 12 deletions scripts/browse.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from googlesearch import search
import requests
from bs4 import BeautifulSoup
from readability import Document#
from readability import Document
import openai


Expand All @@ -24,18 +24,21 @@ def scrape_text(url):

return text


def extract_hyperlinks(soup):
hyperlinks = []
for link in soup.find_all('a', href=True):
hyperlinks.append((link.text, link['href']))
return hyperlinks


def format_hyperlinks(hyperlinks):
formatted_links = []
for link_text, link_url in hyperlinks:
formatted_links.append(f"{link_text} ({link_url})")
return formatted_links


def scrape_links(url):
response = requests.get(url)

Expand All @@ -49,9 +52,10 @@ def scrape_links(url):
script.extract()

hyperlinks = extract_hyperlinks(soup)

return format_hyperlinks(hyperlinks)


def split_text(text, max_length=8192):
paragraphs = text.split("\n")
current_length = 0
Expand All @@ -69,22 +73,33 @@ def split_text(text, max_length=8192):
if current_chunk:
yield "\n".join(current_chunk)

def summarize_text(text, is_website = True):

def summarize_text(text, is_website=True):
if text == "":
return "Error: No text to summarize"

print("Text length: " + str(len(text)) + " characters")
summaries = []
chunks = list(split_text(text))

for i, chunk in enumerate(chunks):
print("Summarizing chunk " + str(i+1) + " / " + str(len(chunks)))
print("Summarizing chunk " + str(i + 1) + " / " + str(len(chunks)))
if is_website:
messages = [{"role": "user", "content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specifc information this subpage contains.: " + chunk},]
messages = [
{
"role": "user",
"content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specifc information this subpage contains.: " +
chunk},
]
else:
messages = [{"role": "user", "content": "Please summarize the following text, focusing on extracting concise and specific information: " + chunk},]

response= openai.ChatCompletion.create(
messages = [
{
"role": "user",
"content": "Please summarize the following text, focusing on extracting concise and specific information: " +
chunk},
]

response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=300,
Expand All @@ -98,9 +113,19 @@ def summarize_text(text, is_website = True):

# Summarize the combined summary
if is_website:
messages = [{"role": "user", "content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specifc information this subpage contains.: " + combined_summary},]
messages = [
{
"role": "user",
"content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specifc information this subpage contains.: " +
combined_summary},
]
else:
messages = [{"role": "user", "content": "Please summarize the following text, focusing on extracting concise and specific infomation: " + combined_summary},]
messages = [
{
"role": "user",
"content": "Please summarize the following text, focusing on extracting concise and specific infomation: " +
combined_summary},
]

response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
Expand All @@ -109,4 +134,4 @@ def summarize_text(text, is_website = True):
)

final_summary = response.choices[0].message.content
return final_summary
return final_summary
29 changes: 23 additions & 6 deletions scripts/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
# Initialize the OpenAI API client
openai.api_key = keys.OPENAI_API_KEY


def create_chat_message(role, content):
"""
Create a chat message with the given role and content.
Expand All @@ -18,7 +19,14 @@ def create_chat_message(role, content):
"""
return {"role": role, "content": content}

def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, token_limit, debug = False):

def chat_with_ai(
prompt,
user_input,
full_message_history,
permanent_memory,
token_limit,
debug=False):
while True:
try:
"""
Expand All @@ -34,8 +42,12 @@ def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, tok
Returns:
str: The AI's response.
"""
current_context = [create_chat_message("system", prompt), create_chat_message("system", f"Permanent memory: {permanent_memory}")]
current_context.extend(full_message_history[-(token_limit - len(prompt) - len(permanent_memory) - 10):])
current_context = [
create_chat_message(
"system", prompt), create_chat_message(
"system", f"Permanent memory: {permanent_memory}")]
current_context.extend(
full_message_history[-(token_limit - len(prompt) - len(permanent_memory) - 10):])
current_context.extend([create_chat_message("user", user_input)])

# Debug print the current context
Expand All @@ -45,7 +57,8 @@ def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, tok
# Skip printing the prompt
if message["role"] == "system" and message["content"] == prompt:
continue
print(f"{message['role'].capitalize()}: {message['content']}")
print(
f"{message['role'].capitalize()}: {message['content']}")
print("----------- END OF CONTEXT ----------------")

response = openai.ChatCompletion.create(
Expand All @@ -56,8 +69,12 @@ def chat_with_ai(prompt, user_input, full_message_history, permanent_memory, tok
assistant_reply = response.choices[0].message["content"]

# Update full message history
full_message_history.append(create_chat_message("user", user_input))
full_message_history.append(create_chat_message("assistant", assistant_reply))
full_message_history.append(
create_chat_message(
"user", user_input))
full_message_history.append(
create_chat_message(
"assistant", assistant_reply))

return assistant_reply
except openai.RateLimitError:
Expand Down
Loading

0 comments on commit b4685f6

Please sign in to comment.