Skip to content

Commit

Permalink
Merge branch 'main' of github.com:chenweize1998/AgentVerse-TaskSolvin…
Browse files Browse the repository at this point in the history
…g into main
  • Loading branch information
chenweize1998 committed Sep 14, 2023
2 parents bcadb7d + 4e694ef commit a162184
Show file tree
Hide file tree
Showing 16 changed files with 899 additions and 130 deletions.
1 change: 1 addition & 0 deletions agentverse/agents/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,4 @@
from agentverse.agents.pipeline.solver import SolverAgent
from agentverse.agents.pipeline.manager import ManagerAgent
from agentverse.agents.pipeline.executor import ExecutorAgent
from agentverse.agents.pipeline.executor_fc import ExecutorAgent_fc
3 changes: 2 additions & 1 deletion agentverse/agents/pipeline/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,15 @@

logger = get_logger()


@agent_registry.register("executor")
class ExecutorAgent(BaseAgent):
def step(self, task_description: str, solution: str) -> ExecutorMessage:
logger.debug("", self.name, Fore.MAGENTA)
prepend_prompt, append_prompt = self.get_all_prompts(
task_description=task_description, solution=solution
)


parsed_response = None
for i in range(self.max_retry):
try:
Expand Down
129 changes: 129 additions & 0 deletions agentverse/agents/pipeline/executor_fc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
from __future__ import annotations

from agentverse.logging import get_logger
from colorama import Fore
import bdb
from string import Template
from typing import TYPE_CHECKING, List, Any

from agentverse.message import ExecutorMessage, Message

from agentverse.agents import agent_registry
from agentverse.agents.base import BaseAgent

import json

logger = get_logger()


function_schema = None

# Function schema for gpt-4
'''
function_schema = {
"name": "run_code",
"description": "Executes code on the user's machine and returns the output",
"parameters": {
"type": "object",
"properties": {
"language": {
"type": "string",
"description": "The programming language",
"enum": ["python", "shell", "applescript", "javascript", "html"]
},
"code": {
"type": "string",
"description": "The code to execute"
}
},
"required": ["language", "code"]
},
}
'''

# what should fill into description
# should I add "system", "role" in openai messages???


function_schema = {
"name": "run_code",
"description": "The solution has been written to `tmp/main.py`. Your are going to write the unit testing code for the solution.",
"parameters": {
"type": "object",
"properties": {
"thought": {
"type": "string",
"description": "Your thought"
},
"file_path": {
"type": "string",
"description": "The path to write your testing code"
},
"code": {
"type": "string",
"description": "The testing code"
},
"command": {
"type": "string",
"description": "The command to change directory and execute your testing code"
}
},
"required": ["thought", "file_path", "code", "command"]
},
}



@agent_registry.register("executor_fc")
class ExecutorAgent_fc(BaseAgent):
def step(self, task_description: str, solution: str) -> ExecutorMessage:

logger.debug("", self.name, Fore.MAGENTA)
prepend_prompt, append_prompt = self.get_all_prompts(
task_description=task_description, solution=solution
)


# Function Call format
# The function call input can be optimized
code_description, dict_format = append_prompt.split(":\n")
append_prompt = code_description.strip()

parsed_response = None
for i in range(self.max_retry):
try:
#response = self.llm.generate_response_funcation_call(prepend_prompt, [], append_prompt, [function_schema])
response = self.llm.generate_response(prepend_prompt, [], append_prompt, [function_schema])
parsed_response = self.output_parser.parse(response)
break
except (KeyboardInterrupt, bdb.BdbQuit):
raise
except Exception as e:
logger.error(e)
logger.warn("Retrying...")
continue

if parsed_response is None:
logger.error(f"{self.name} failed to generate valid response.")


message = ExecutorMessage(
sender=self.name,
sender_agent=self,
content=parsed_response.return_values["output"],
)

return message


async def astep(self, solution: str) -> ExecutorMessage:
"""Asynchronous version of step"""
pass

def add_message_to_memory(self, messages: List[Message]) -> None:
self.memory.add_message(messages)

def reset(self) -> None:
"""Reset the agent"""
self.memory.reset()
# TODO: reset receiver
6 changes: 6 additions & 0 deletions agentverse/environments/executor/code_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,13 @@
import multiprocessing
from typing import TYPE_CHECKING, Any, List, Tuple

from agentverse.logging import get_logger
from agentverse.agents import ExecutorAgent
from agentverse.logging import logger

from . import BaseExecutor, executor_registry

logger = get_logger()

def execute_command(command: str, result_list) -> str:
# TODO: make it more secure
Expand All @@ -31,6 +33,9 @@ def step(
*args,
**kwargs,
) -> Any:

#import pdb;pdb.set_trace()

os.makedirs("tmp", exist_ok=True)
self.write_to_file("tmp/main.py", solution)
manager = multiprocessing.Manager()
Expand Down Expand Up @@ -60,6 +65,7 @@ def step(
result.append("Execution timed out.")
return result[0]


def write_to_file(self, file_name, file_content):
# TODO: generalize this method to a common tool
try:
Expand Down
1 change: 1 addition & 0 deletions agentverse/environments/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,7 @@ def execute(self, final_solution: str = "") -> Any:
"""execution stage.
Use the executor to finish the task.
"""

return self.executor.step(
self.agents[AGENT_TYPES.EXECUTION], self.task_description, final_solution
)
Expand Down
86 changes: 67 additions & 19 deletions agentverse/llms/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,50 +105,98 @@ def generate_response(
prepend_prompt: str = "",
history: List[dict] = [],
append_prompt: str = "",
functions: List[dict] = [],
) -> LLMResult:

# logger.debug(prepend_prompt)
# logger.debug(history)
# logger.debug(append_prompt)

messages = self.construct_messages(prepend_prompt, history, append_prompt)
logger.log_prompt(messages)

try:
response = openai.ChatCompletion.create(
messages=messages,
**self.args.dict(),
)
#Execute function call
if functions != []:
response = openai.ChatCompletion.create(
model=self.args.model,
messages=messages,
functions=functions,
#function_call="auto",
function_call={"name": "run_code"},
#stream=True,
temperature=self.args.temperature,
)
return LLMResult(
content=response["choices"][0]["message"]["function_call"]["arguments"],
send_tokens=response["usage"]["prompt_tokens"],
recv_tokens=response["usage"]["completion_tokens"],
total_tokens=response["usage"]["total_tokens"],
)

else:
response = openai.ChatCompletion.create(
messages=messages,
**self.args.dict(),
)
return LLMResult(
content=response["choices"][0]["message"]["content"],
send_tokens=response["usage"]["prompt_tokens"],
recv_tokens=response["usage"]["completion_tokens"],
total_tokens=response["usage"]["total_tokens"],
)
except (OpenAIError, KeyboardInterrupt) as error:
raise
return LLMResult(
content=response["choices"][0]["message"]["content"],
send_tokens=response["usage"]["prompt_tokens"],
recv_tokens=response["usage"]["completion_tokens"],
total_tokens=response["usage"]["total_tokens"],
)



async def agenerate_response(
self,
prepend_prompt: str = "",
history: List[dict] = [],
append_prompt: str = "",
functions: List[dict] = [],
) -> LLMResult:

# logger.debug(prepend_prompt)
# logger.debug(history)
# logger.debug(append_prompt)
messages = self.construct_messages(prepend_prompt, history, append_prompt)
logger.log_prompt(messages)

try:
response = await openai.ChatCompletion.acreate(
messages=messages,
**self.args.dict(),
#Execute function call
if functions != []:
response = await openai.ChatCompletion.create(
model=self.args.model,
messages=messages,
functions=functions,
#function_call="auto",
function_call={"name": "run_code"},
#stream=True,
temperature=self.args.temperature,
)
return LLMResult(
content=response["choices"][0]["message"]["function_call"]["arguments"],
send_tokens=response["usage"]["prompt_tokens"],
recv_tokens=response["usage"]["completion_tokens"],
total_tokens=response["usage"]["total_tokens"],
)

else:
response = await openai.ChatCompletion.acreate(
messages=messages,
**self.args.dict(),
)
return LLMResult(
content=response["choices"][0]["message"]["content"],
send_tokens=response["usage"]["prompt_tokens"],
recv_tokens=response["usage"]["completion_tokens"],
total_tokens=response["usage"]["total_tokens"],
)
except (OpenAIError, KeyboardInterrupt) as error:
raise
return LLMResult(
content=response["choices"][0]["message"]["content"],
send_tokens=response["usage"]["prompt_tokens"],
recv_tokens=response["usage"]["completion_tokens"],
total_tokens=response["usage"]["total_tokens"],
)


def construct_messages(
self, prepend_prompt: str, history: List[dict], append_prompt: str
Expand Down
Loading

0 comments on commit a162184

Please sign in to comment.