Skip to content

Commit

Permalink
Removed OpenAI API key stuf
Browse files Browse the repository at this point in the history
  • Loading branch information
rhohndorf committed Apr 13, 2023
1 parent 3f25a67 commit 628e9ae
Show file tree
Hide file tree
Showing 3 changed files with 2 additions and 40 deletions.
17 changes: 0 additions & 17 deletions scripts/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,17 +42,6 @@ def __init__(self):
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 1500))
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 2000))

self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.use_azure = False
self.use_azure = os.getenv("USE_AZURE") == 'True'
if self.use_azure:
self.openai_api_base = os.getenv("OPENAI_AZURE_API_BASE")
self.openai_api_version = os.getenv("OPENAI_AZURE_API_VERSION")
self.openai_deployment_id = os.getenv("OPENAI_AZURE_DEPLOYMENT_ID")
openai.api_type = "azure"
openai.api_base = self.openai_api_base
openai.api_version = self.openai_api_version

self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")

self.use_mac_os_tts = False
Expand All @@ -78,8 +67,6 @@ def __init__(self):
# Note that indexes must be created on db 0 in redis, this is not configureable.

self.memory_backend = os.getenv("MEMORY_BACKEND", 'local')
# Initialize the OpenAI API client
openai.api_key = self.openai_api_key

def set_continuous_mode(self, value: bool):
"""Set the continuous mode value."""
Expand Down Expand Up @@ -108,10 +95,6 @@ def set_smart_token_limit(self, value: int):
"""Set the smart token limit value."""
self.smart_token_limit = value

def set_openai_api_key(self, value: str):
"""Set the OpenAI API key value."""
self.openai_api_key = value

def set_elevenlabs_api_key(self, value: str):
"""Set the ElevenLabs API key value."""
self.elevenlabs_api_key = value
Expand Down
23 changes: 1 addition & 22 deletions scripts/llm_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,29 +3,8 @@
from llama_cpp import Llama

cfg = Config()
llm = Llama(model_path="ggml-vicuna-13b-4bit.bin", n_ctx=2048, embedding=True)
# openai.api_key = cfg.openai_api_key
llm = Llama(model_path="/home/ruben/Code/llama.cpp/models/13B/ggml-vicuna-13b-4bit.bin", n_ctx=2048, embedding=True)

# # Overly simple abstraction until we create something better
# def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str:
# """Create a chat completion using the OpenAI API"""
# if cfg.use_azure:
# response = openai.ChatCompletion.create(
# deployment_id=cfg.openai_deployment_id,
# model=model,
# messages=messages,
# temperature=temperature,
# max_tokens=max_tokens
# )
# else:
# response = openai.ChatCompletion.create(
# model=model,
# messages=messages,
# temperature=temperature,
# max_tokens=max_tokens
# )

# return response.choices[0].message["content"]

def create_chat_completion(messages, model=None, temperature=0.36, max_tokens=None)->str:
print("Message Content", messages[0]["content"])
Expand Down
2 changes: 1 addition & 1 deletion scripts/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ def clean_input(prompt: str=''):
try:
return input(prompt)
except KeyboardInterrupt:
print("You interrupted Auto-GPT")
print("You interrupted Auto-Llama")
print("Quitting...")
exit(0)

0 comments on commit 628e9ae

Please sign in to comment.