Skip to content

Commit

Permalink
Fixing llm provider
Browse files Browse the repository at this point in the history
  • Loading branch information
Notnaton committed Jan 26, 2024
1 parent a6faa14 commit cf24151
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 2 deletions.
2 changes: 1 addition & 1 deletion interpreter/core/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def __init__(self, interpreter):
self.context_window: Optional[int] = None
self.max_tokens: Optional[int] = None
self.api_base: Optional[str] = None
self.api_key: str = "key" # Adding a place holder "key" to stop OpenAI from crashing when using local server
self.api_key: Optional[str] = None # Adding a place holder "key" to stop OpenAI from crashing when using local server
self.api_version: Optional[str] = None
self.custom_llm_provider: Optional[str] = None

Expand Down
2 changes: 1 addition & 1 deletion interpreter/terminal_interface/start_terminal_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -674,7 +674,7 @@ def start_terminal_interface(interpreter):
# If we've set a custom api base, we want it to be sent in an openai compatible way.
# So we need to tell LiteLLM to do this by changing the model name:
if interpreter.llm.api_base:
if not interpreter.llm.model.lower().split("/")[0] in litellm.provider_list:
if not interpreter.llm.model.lower().split("/", 1)[0] in litellm.provider_list:
interpreter.llm.custom_llm_provider = "openai"

# If --conversations is used, run conversation_navigator
Expand Down

0 comments on commit cf24151

Please sign in to comment.