Skip to content

Commit

Permalink
Make compatible with gpt-3.5
Browse files Browse the repository at this point in the history
I made the json parsing more forgivable. I improved the prompt, using things I learned from: Koobah/Auto-GPT
  • Loading branch information
Taytay committed Apr 2, 2023
1 parent f808710 commit 9ff7e59
Show file tree
Hide file tree
Showing 5 changed files with 240 additions and 42 deletions.
56 changes: 56 additions & 0 deletions scripts/ai_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
import yaml
import data

class AIConfig:
def __init__(self, ai_name="", ai_role="", ai_goals=[]):
self.ai_name = ai_name
self.ai_role = ai_role
self.ai_goals = ai_goals

# @classmethod
# def create_from_user_prompts(cls):
# ai_name = input("Name your AI: ") or "Entrepreneur-GPT"
# ai_role = input(f"{ai_name} is: ") or "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
# print("Enter up to 5 goals for your AI: ")
# print("For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
# print("Enter nothing to load defaults, enter nothing when finished.")
# ai_goals = []
# for i in range(5):
# ai_goal = input(f"Goal {i+1}: ")
# if ai_goal == "":
# break
# ai_goals.append(ai_goal)
# if len(ai_goals) == 0:
# ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"]
# return cls(ai_name, ai_role, ai_goals)

@classmethod
def load(cls, config_file="config.yaml"):
# Load variables from yaml file if it exists
try:
with open(config_file) as file:
config_params = yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError:
config_params = {}

ai_name = config_params.get("ai_name", "")
ai_role = config_params.get("ai_role", "")
ai_goals = config_params.get("ai_goals", [])

return cls(ai_name, ai_role, ai_goals)

def save(self, config_file="config.yaml"):
config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}
with open(config_file, "w") as file:
documents = yaml.dump(config, file)

def construct_full_prompt(self):
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""

# Construct full prompt
full_prompt = f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
for i, goal in enumerate(self.ai_goals):
full_prompt += f"{i+1}. {goal}\n"

full_prompt += f"\n\n{data.load_prompt()}"
return full_prompt
34 changes: 33 additions & 1 deletion scripts/ai_functions.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,16 @@
from typing import List, Optional
import json
import openai
import dirtyjson
from config import Config

cfg = Config()

# This is a magic function that can do anything with no-code. See
# https://github.com/Torantulino/AI-Functions for more info.
def call_ai_function(function, args, description, model="gpt-4"):
def call_ai_function(function, args, description, model=cfg.smart_llm_model):
# For each arg, if any are None, convert to "None":
args = [str(arg) if arg is not None else "None" for arg in args]
# parse args to comma seperated string
args = ", ".join(args)
messages = [
Expand Down Expand Up @@ -61,3 +66,30 @@ def write_tests(code: str, focus: List[str]) -> str:

result_string = call_ai_function(function_string, args, description_string)
return result_string


# TODO: Make debug a global config var
def fix_json(json_str: str, schema:str = None, debug=True) -> str:
# Try to fix the JSON using gpt:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
args = [json_str, schema]
description_string = """Fixes the provided JSON string to make it parseable. If the schema is provided, the JSON will be made to look like the schema, otherwise it will be made to look like a valid JSON object."""

result_string = call_ai_function(
function_string, args, description_string, model=cfg.fast_llm_model
)
if debug:
print("------------ JSON FIX ATTEMPT ---------------")
print(f"Original JSON: {json_str}")
print(f"Fixed JSON: {result_string}")
print("----------- END OF FIX ATTEMPT ----------------")
try:
return dirtyjson.loads(result_string)
except:
# Log the exception:
print("Failed to fix JSON")
# Get the call stack:
import traceback
call_stack = traceback.format_exc()
print(call_stack)
return {}
36 changes: 19 additions & 17 deletions scripts/data/prompt.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
CONSTRAINTS:

1. 6000-word count limit for memory
1. 4000-word count limit for memory
2. No user assistance

COMMANDS:
Expand Down Expand Up @@ -38,22 +38,24 @@ PERFORMANCE EVALUATION:
3. Reflect on past decisions and strategies to refine your approach.
4. Every command has a cost, so be smart and efficent. Aim to complete tasks in the least number of steps.

You should only respond in JSON format as described below

RESPONSE FORMAT:
{
"command":
{
"name": "command name",
"args":
{
"arg name": "value"
"command": {
"name": "command name",
"args":{
"arg name": "value"
}
},
"thoughts":
{
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism"
"speak": "thoughts summary to say to user"
}
}
},
"thoughts":
{
"text": "thought",
"reasoning": "reasoning",
"plan": "short bulleted long-term plan",
"criticism": "constructive self-criticism"
"speak": "thoughts summary to say to user"
}
}

Ensure the response can be parsed by Python json.loads
122 changes: 98 additions & 24 deletions scripts/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,16 +11,15 @@
from enum import Enum, auto
import sys
from config import Config

from json_parser import fix_and_parse_json
from ai_config import AIConfig
import traceback
import yaml

class Argument(Enum):
CONTINUOUS_MODE = "continuous-mode"
SPEAK_MODE = "speak-mode"

# normally 6000 for gpt-4
TOKEN_LIMIT=4000


def print_to_console(
title,
title_color,
Expand Down Expand Up @@ -53,7 +52,7 @@ def print_assistant_thoughts(assistant_reply):
global cfg
try:
# Parse and print Assistant response
assistant_reply_json = json.loads(assistant_reply)
assistant_reply_json = fix_and_parse_json(assistant_reply)

assistant_thoughts = assistant_reply_json.get("thoughts")
if assistant_thoughts:
Expand All @@ -80,8 +79,13 @@ def print_assistant_thoughts(assistant_reply):
if assistant_thoughts_plan:
print_to_console("PLAN:", Fore.YELLOW, "")
if assistant_thoughts_plan:

# Split the input_string using the newline character and dash
# If it's a list, join it into a string
if isinstance(assistant_thoughts_plan, list):
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
elif isinstance(assistant_thoughts_plan, dict):
assistant_thoughts_plan = str(assistant_thoughts_plan)
# Split the input_string using the newline character and dash

lines = assistant_thoughts_plan.split('\n')

# Iterate through the lines and print each one with a bullet
Expand All @@ -103,11 +107,89 @@ def print_assistant_thoughts(assistant_reply):
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
# All other errors, return "Error: + error message"
except Exception as e:
print_to_console("Error: \n", Fore.RED, str(e))
call_stack = traceback.format_exc()
print_to_console("Error: \n", Fore.RED, call_stack)

def load_variables(config_file="config.yaml"):
# Load variables from yaml file if it exists
try:
with open(config_file) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
ai_name = config.get("ai_name")
ai_role = config.get("ai_role")
ai_goals = config.get("ai_goals")
except FileNotFoundError:
ai_name = ""
ai_role = ""
ai_goals = []

# Prompt the user for input if config file is missing or empty values
if not ai_name:
ai_name = input("Name your AI: ")
if ai_name == "":
ai_name = "Entrepreneur-GPT"

if not ai_role:
ai_role = input(f"{ai_name} is: ")
if ai_role == "":
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."

if not ai_goals:
print("Enter up to 5 goals for your AI: ")
print("For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
print("Enter nothing to load defaults, enter nothing when finished.")
ai_goals = []
for i in range(5):
ai_goal = input(f"Goal {i+1}: ")
if ai_goal == "":
break
ai_goals.append(ai_goal)
if len(ai_goals) == 0:
ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"]

# Save variables to yaml file
config = {"ai_name": ai_name, "ai_role": ai_role, "ai_goals": ai_goals}
with open(config_file, "w") as file:
documents = yaml.dump(config, file)

prompt = data.load_prompt()
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""

# Construct full prompt
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
for i, goal in enumerate(ai_goals):
full_prompt += f"{i+1}. {goal}\n"

full_prompt += f"\n\n{prompt}"
return full_prompt


def construct_prompt():
config = AIConfig.load()
if config.ai_name:
print_to_console(
f"Welcome back, {config.ai_name}!",
Fore.GREEN,
"Let's continue our journey.",
speak_text=True)
should_continue = input(f"Continue with the last settings? (Settings: {config.ai_name}, {config.ai_role}, {config.ai_goals}) (Y/n): ")
if should_continue.lower() == "n":
config = AIConfig()

if not config.ai_name:
config = prompt_user()
config.save()

# Get rid of this global:
global ai_name
ai_name = config.ai_name

full_prompt = config.construct_full_prompt()
return full_prompt


def prompt_user():
ai_name = ""
# Construct the prompt
print_to_console(
"Welcome to Auto-GPT! ",
Expand Down Expand Up @@ -155,19 +237,8 @@ def construct_prompt():
ai_goals = ["Increase net worth", "Grow Twitter Account",
"Develop and manage multiple businesses autonomously"]

prompt = data.load_prompt()
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""

# Construct full prompt
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
for i, goal in enumerate(ai_goals):
full_prompt += f"{i+1}. {goal}\n"

full_prompt += f"\n\n{prompt}"
return full_prompt

# Check if the python script was executed with arguments, get those arguments

config = AIConfig(ai_name, ai_role, ai_goals)
return config

def parse_arguments():
global cfg
Expand All @@ -185,6 +256,8 @@ def parse_arguments():
print_to_console("Speak Mode: ", Fore.GREEN, "ENABLED")
cfg.set_speak_mode(True)

# TODO: Better argument parsing:
# TODO: fill in llm values here

cfg = Config()

Expand All @@ -194,9 +267,10 @@ def parse_arguments():
print(prompt)
# Initialize variables
full_message_history = []
token_limit = TOKEN_LIMIT # The maximum number of tokens allowed in the API call
token_limit = cfg.thinking_token_limit # The maximum number of tokens allowed in the API call
result = None
user_input = "GENERATE NEXT COMMAND JSON"
# Make a constant:
user_input = "Determine which next command to use, and respond using the format specified above:"

# Interaction Loop
while True:
Expand Down
34 changes: 34 additions & 0 deletions tests/json_tests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import unittest
import os
import sys
# Probably a better way:
sys.path.append(os.path.abspath('../scripts'))
from json_parser import fix_and_parse_json

class TestParseJson(unittest.TestCase):
def test_valid_json(self):
# Test that a valid JSON string is parsed correctly
json_str = '{"name": "John", "age": 30, "city": "New York"}'
obj = fix_and_parse_json(json_str)
self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"})

def test_invalid_json_minor(self):
# Test that an invalid JSON string can be fixed with gpt
json_str = '{"name": "John", "age": 30, "city": "New York",}'
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), {"name": "John", "age": 30, "city": "New York"})

def test_invalid_json_major_with_gpt(self):
# Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False
json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=True), {"name": "John", "age": 30, "city": "New York"})

def test_invalid_json_major_without_gpt(self):
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
# Assert that this raises an exception:
with self.assertRaises(Exception):
fix_and_parse_json(json_str, try_to_fix_with_gpt=False)


if __name__ == '__main__':
unittest.main()

0 comments on commit 9ff7e59

Please sign in to comment.