Skip to content

Commit

Permalink
Remove MAX_CHARS traffic control (All-Hands-AI#2694)
Browse files Browse the repository at this point in the history
* Remove MAX_CHARS limiting

* More cleanup
  • Loading branch information
li-boxuan authored Jun 29, 2024
1 parent 75f3181 commit e45b311
Show file tree
Hide file tree
Showing 13 changed files with 1 addition and 53 deletions.
3 changes: 0 additions & 3 deletions agenthub/codeact_agent/codeact_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,9 +226,6 @@ def step(self, state: State) -> Action:
],
temperature=0.0,
)
state.num_of_chars += sum(
len(message['content']) for message in messages
) + len(response.choices[0].message.content)
return self.action_parser.parse(response)

def search_memory(self, query: str) -> list[str]:
Expand Down
3 changes: 0 additions & 3 deletions agenthub/codeact_swe_agent/codeact_swe_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,9 +181,6 @@ def step(self, state: State) -> Action:
],
temperature=0.0,
)
state.num_of_chars += sum(
len(message['content']) for message in messages
) + len(response.choices[0].message.content)

return self.response_parser.parse(response)

Expand Down
1 change: 0 additions & 1 deletion agenthub/micro/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ def step(self, state: State) -> Action:
messages = [{'content': prompt, 'role': 'user'}]
resp = self.llm.completion(messages=messages)
action_resp = resp['choices'][0]['message']['content']
state.num_of_chars += len(prompt) + len(action_resp)
action = parse_response(action_resp)
return action

Expand Down
5 changes: 0 additions & 5 deletions agenthub/monologue_agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,11 +183,6 @@ def step(self, state: State) -> Action:
# format all as a single message, a monologue
resp = self.llm.completion(messages=messages)

# keep track of max_chars fallback option
state.num_of_chars += len(prompt) + len(
resp['choices'][0]['message']['content']
)

action = self.response_parser.parse(resp)
self.latest_action = action
return action
Expand Down
3 changes: 0 additions & 3 deletions agenthub/planner_agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,9 +48,6 @@ def step(self, state: State) -> Action:
prompt = get_prompt(state)
messages = [{'content': prompt, 'role': 'user'}]
resp = self.llm.completion(messages=messages)
state.num_of_chars += len(prompt) + len(
resp['choices'][0]['message']['content']
)
return self.response_parser.parse(resp)

def search_memory(self, query: str) -> list[str]:
Expand Down
10 changes: 0 additions & 10 deletions opendevin/controller/agent_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
LLMMalformedActionError,
LLMNoActionError,
LLMResponseError,
MaxCharsExceedError,
)
from opendevin.core.logger import opendevin_logger as logger
from opendevin.core.schema import AgentState
Expand Down Expand Up @@ -37,7 +36,6 @@
)

MAX_ITERATIONS = config.max_iterations
MAX_CHARS = config.llm.max_chars
MAX_BUDGET_PER_TASK = config.max_budget_per_task


Expand All @@ -58,7 +56,6 @@ def __init__(
event_stream: EventStream,
sid: str = 'default',
max_iterations: int = MAX_ITERATIONS,
max_chars: int = MAX_CHARS,
max_budget_per_task: float | None = MAX_BUDGET_PER_TASK,
initial_state: State | None = None,
is_delegate: bool = False,
Expand All @@ -70,15 +67,13 @@ def __init__(
event_stream: The event stream to publish events to.
sid: The session ID of the agent.
max_iterations: The maximum number of iterations the agent can run.
max_chars: The maximum number of characters the agent can output.
max_budget_per_task: The maximum budget (in USD) allowed per task, beyond which the agent will stop.
initial_state: The initial state of the controller.
is_delegate: Whether this controller is a delegate.
"""
self._step_lock = asyncio.Lock()
self.id = sid
self.agent = agent
self.max_chars = max_chars
if initial_state is None:
self.state = State(inputs={}, max_iterations=max_iterations)
else:
Expand Down Expand Up @@ -224,7 +219,6 @@ async def start_delegate(self, action: AgentDelegateAction):
inputs=action.inputs or {},
iteration=0,
max_iterations=self.state.max_iterations,
num_of_chars=self.state.num_of_chars,
delegate_level=self.state.delegate_level + 1,
# metrics should be shared between parent and child
metrics=self.state.metrics,
Expand All @@ -235,7 +229,6 @@ async def start_delegate(self, action: AgentDelegateAction):
agent=agent,
event_stream=self.event_stream,
max_iterations=self.state.max_iterations,
max_chars=self.max_chars,
max_budget_per_task=self.max_budget_per_task,
initial_state=state,
is_delegate=True,
Expand Down Expand Up @@ -298,9 +291,6 @@ async def _step(self):
await self.event_stream.add_event(obs, EventSource.AGENT)
return

if self.state.num_of_chars > self.max_chars:
raise MaxCharsExceedError(self.state.num_of_chars, self.max_chars)

logger.info(
f'{self.agent.name} LEVEL {self.state.delegate_level} STEP {self.state.iteration}',
extra={'msg_type': 'STEP'},
Expand Down
2 changes: 0 additions & 2 deletions opendevin/controller/state/state.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,6 @@ class State:
root_task: RootTask = field(default_factory=RootTask)
iteration: int = 0
max_iterations: int = 100
# number of characters we have sent to and received from LLM so far for current task
num_of_chars: int = 0
background_commands_obs: list[CmdOutputObservation] = field(default_factory=list)
history: list[tuple[Action, Observation]] = field(default_factory=list)
updated_info: list[tuple[Action, Observation]] = field(default_factory=list)
Expand Down
9 changes: 0 additions & 9 deletions opendevin/core/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ class LLMConfig(metaclass=Singleton):
retry_min_wait: The minimum time to wait between retries, in seconds. This is exponential backoff minimum. For models with very low limits, this can be set to 15-20.
retry_max_wait: The maximum time to wait between retries, in seconds. This is exponential backoff maximum.
timeout: The timeout for the API.
max_chars: The maximum number of characters to send to and receive from the API. This is a fallback for token counting, which doesn't work in all cases.
temperature: The temperature for the API.
top_p: The top p for the API.
custom_llm_provider: The custom LLM provider to use. This is undocumented in opendevin, and normally not used. It is documented on the litellm side.
Expand All @@ -63,7 +62,6 @@ class LLMConfig(metaclass=Singleton):
retry_min_wait: int = 3
retry_max_wait: int = 60
timeout: int | None = None
max_chars: int = 5_000_000 # fallback for token counting
temperature: float = 0
top_p: float = 0.5
custom_llm_provider: str | None = None
Expand Down Expand Up @@ -523,13 +521,6 @@ def get_parser():
type=float,
help='The maximum budget allowed per task, beyond which the agent will stop.',
)
parser.add_argument(
'-n',
'--max-chars',
default=config.llm.max_chars,
type=int,
help='The maximum number of characters to send to and receive from LLM per task',
)
# --eval configs are for evaluations only
parser.add_argument(
'--eval-output-dir',
Expand Down
9 changes: 0 additions & 9 deletions opendevin/core/exceptions.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,3 @@
class MaxCharsExceedError(Exception):
def __init__(self, num_of_chars=None, max_chars_limit=None):
if num_of_chars is not None and max_chars_limit is not None:
message = f'Number of characters {num_of_chars} exceeds MAX_CHARS limit: {max_chars_limit}'
else:
message = 'Number of characters exceeds MAX_CHARS limit'
super().__init__(message)


class AgentNoInstructionError(Exception):
def __init__(self, message='Instruction must be provided'):
super().__init__(message)
Expand Down
1 change: 0 additions & 1 deletion opendevin/core/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@ async def main(
agent=agent,
max_iterations=args.max_iterations,
max_budget_per_task=args.max_budget_per_task,
max_chars=args.max_chars,
event_stream=event_stream,
)
runtime = ServerRuntime(event_stream=event_stream, sandbox=sandbox)
Expand Down
1 change: 0 additions & 1 deletion opendevin/core/schema/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ class ConfigType(str, Enum):
AGENT_MEMORY_MAX_THREADS = 'AGENT_MEMORY_MAX_THREADS'
AGENT_MEMORY_ENABLED = 'AGENT_MEMORY_ENABLED'
MAX_ITERATIONS = 'MAX_ITERATIONS'
MAX_CHARS = 'MAX_CHARS'
AGENT = 'AGENT'
E2B_API_KEY = 'E2B_API_KEY'
SANDBOX_TYPE = 'SANDBOX_TYPE'
Expand Down
2 changes: 0 additions & 2 deletions opendevin/server/session/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@ async def _create_controller(self, start_event: dict):
api_key = args.get(ConfigType.LLM_API_KEY, config.llm.api_key)
api_base = config.llm.base_url
max_iterations = args.get(ConfigType.MAX_ITERATIONS, config.max_iterations)
max_chars = args.get(ConfigType.MAX_CHARS, config.llm.max_chars)

logger.info(f'Creating agent {agent_cls} using LLM {model}')
llm = LLM(model=model, api_key=api_key, base_url=api_base)
Expand All @@ -109,7 +108,6 @@ async def _create_controller(self, start_event: dict):
event_stream=self.event_stream,
agent=agent,
max_iterations=int(max_iterations),
max_chars=int(max_chars),
)
try:
agent_state = State.restore_from_session(self.sid)
Expand Down
5 changes: 1 addition & 4 deletions tests/unit/test_arg_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ def test_help_message(capsys):
expected_help_message = """
usage: pytest [-h] [-d DIRECTORY] [-t TASK] [-f FILE] [-c AGENT_CLS]
[-m MODEL_NAME] [-i MAX_ITERATIONS] [-b MAX_BUDGET_PER_TASK]
[-n MAX_CHARS] [--eval-output-dir EVAL_OUTPUT_DIR]
[--eval-output-dir EVAL_OUTPUT_DIR]
[--eval-n-limit EVAL_N_LIMIT]
[--eval-num-workers EVAL_NUM_WORKERS] [--eval-note EVAL_NOTE]
[-l LLM_CONFIG]
Expand All @@ -34,9 +34,6 @@ def test_help_message(capsys):
-b MAX_BUDGET_PER_TASK, --max-budget-per-task MAX_BUDGET_PER_TASK
The maximum budget allowed per task, beyond which the
agent will stop.
-n MAX_CHARS, --max-chars MAX_CHARS
The maximum number of characters to send to and
receive from LLM per task
--eval-output-dir EVAL_OUTPUT_DIR
The directory to save evaluation output
--eval-n-limit EVAL_N_LIMIT
Expand Down

0 comments on commit e45b311

Please sign in to comment.