Skip to content

Commit

Permalink
Merge branch 'main' into config
Browse files Browse the repository at this point in the history
  • Loading branch information
mufeedvh authored Mar 26, 2024
2 parents 0c7ce66 + 0ed2f55 commit 4e58f3f
Show file tree
Hide file tree
Showing 31 changed files with 329 additions and 72 deletions.
16 changes: 10 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -145,12 +145,16 @@ To start using Devika, follow these steps:

Devika requires certain configuration settings and API keys to function properly. Update the `config.toml` file with the following information:

- `OPENAI_API_KEY`: Your OpenAI API key for accessing GPT models.
- `CLAUDE_API_KEY`: Your Anthropic API key for accessing Claude models.
- `BING_API_KEY`: Your Bing Search API key for web searching capabilities.
- `DATABASE_URL`: The URL for your database connection.
- `LOG_DIRECTORY`: The directory where Devika's logs will be stored.
- `PROJECT_DIRECTORY`: The directory where Devika's projects will be stored.
- `SQLITE_DB`: The path to the SQLite database file for storing Devika's data.
- `SCREENSHOTS_DIR`: The directory where screenshots captured by Devika will be stored.
- `PDFS_DIR`: The directory where PDF files processed by Devika will be stored.
- `PROJECTS_DIR`: The directory where Devika's projects will be stored.
- `LOGS_DIR`: The directory where Devika's logs will be stored.
- `REPOS_DIR`: The directory where Git repositories cloned by Devika will be stored.
- `BING`: Your Bing Search API key for web searching capabilities.
- `CLAUDE`: Your Anthropic API key for accessing Claude models.
- `NETLIFY`: Your Netlify API key for deploying and managing web projects.
- `OPENAI`: Your OpenAI API key for accessing GPT models.

Make sure to keep your API keys secure and do not share them publicly.

Expand Down
1 change: 1 addition & 0 deletions config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ BING = "<YOUR_BING_API_KEY>"
CLAUDE = "<YOUR_CLAUDE_API_KEY>"
NETLIFY = "<YOUR_NETLIFY_API_KEY>"
OPENAI = "<YOUR_OPENAI_API_KEY>"
GROQ = "<YOUR_GROQ_API_KEY>"

[API_ENDPOINTS]
BING = "https://api.bing.microsoft.com/v7.0/search"
Expand Down
5 changes: 3 additions & 2 deletions devika.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,9 @@ def calculate_tokens():
@app.route("/api/token-usage", methods=["GET"])
@route_logger(logger)
def token_usage():
from src.llm import TOKEN_USAGE
return jsonify({"token_usage": TOKEN_USAGE})
project_name = request.args.get("project_name")
token_count = AgentState().get_latest_token_usage(project_name)
return jsonify({"token_usage": token_count})


@app.route("/api/real-time-logs", methods=["GET"])
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,5 @@ keybert
GitPython
netlify-py
Markdown
xhtml2pdf
xhtml2pdf
groq
6 changes: 3 additions & 3 deletions src/agents/action/action.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,15 @@ def validate_response(self, response: str):
else:
return response["response"], response["action"]

def execute(self, conversation: list) -> str:
def execute(self, conversation: list, project_name: str) -> str:
prompt = self.render(conversation)
response = self.llm.inference(prompt)
response = self.llm.inference(prompt, project_name)

valid_response = self.validate_response(response)

while not valid_response:
print("Invalid response from the model, trying again...")
return self.execute(conversation)
return self.execute(conversation, project_name)

print("===" * 10)
print(valid_response)
Expand Down
24 changes: 13 additions & 11 deletions src/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,8 @@ def search_queries(self, queries: list, project_name: str) -> dict:
Formatter Agent is invoked to format and learn from the contents
"""
results[query] = self.formatter.execute(
browser.extract_text()
browser.extract_text(),
project_name
)

"""
Expand All @@ -118,7 +119,7 @@ def update_contextual_keywords(self, sentence: str):
Decision making Agent
"""
def make_decision(self, prompt: str, project_name: str) -> str:
decision = self.decision.execute(prompt)
decision = self.decision.execute(prompt, project_name)

for item in decision:
function = item["function"]
Expand All @@ -134,7 +135,7 @@ def make_decision(self, prompt: str, project_name: str) -> str:
elif function == "generate_pdf_document":
user_prompt = args["user_prompt"]
# Call the reporter agent to generate the PDF document
markdown = self.reporter.execute([user_prompt], "")
markdown = self.reporter.execute([user_prompt], "", project_name)
_out_pdf_file = PDF().markdown_to_pdf(markdown, project_name)

project_name_space_url = project_name.replace(" ", "%20")
Expand All @@ -154,10 +155,10 @@ def make_decision(self, prompt: str, project_name: str) -> str:
elif function == "coding_project":
user_prompt = args["user_prompt"]
# Call the planner, researcher, coder agents in sequence
plan = self.planner.execute(user_prompt)
plan = self.planner.execute(user_prompt, project_name)
planner_response = self.planner.parse_response(plan)

research = self.researcher.execute(plan, self.collected_context_keywords)
research = self.researcher.execute(plan, self.collected_context_keywords, project_name)
search_results = self.search_queries(research["queries"], project_name)

code = self.coder.execute(
Expand All @@ -177,7 +178,7 @@ def subsequent_execute(self, prompt: str, project_name: str) -> str:
conversation = ProjectManager().get_all_messages_formatted(project_name)
code_markdown = ReadCode(project_name).code_set_to_markdown()

response, action = self.action.execute(conversation)
response, action = self.action.execute(conversation, project_name)

ProjectManager().add_message_from_devika(project_name, response)

Expand All @@ -188,7 +189,8 @@ def subsequent_execute(self, prompt: str, project_name: str) -> str:
if action == "answer":
response = self.answer.execute(
conversation=conversation,
code_markdown=code_markdown
code_markdown=code_markdown,
project_name=project_name
)
ProjectManager().add_message_from_devika(project_name, response)
elif action == "run":
Expand Down Expand Up @@ -238,7 +240,7 @@ def subsequent_execute(self, prompt: str, project_name: str) -> str:

self.patcher.save_code_to_project(code, project_name)
elif action == "report":
markdown = self.reporter.execute(conversation, code_markdown)
markdown = self.reporter.execute(conversation, code_markdown, project_name)

_out_pdf_file = PDF().markdown_to_pdf(markdown, project_name)

Expand All @@ -261,7 +263,7 @@ def execute(self, prompt: str, project_name_from_user: str = None) -> str:
if project_name_from_user:
ProjectManager().add_message_from_user(project_name_from_user, prompt)

plan = self.planner.execute(prompt)
plan = self.planner.execute(prompt, project_name_from_user)
print(plan)
print("=====" * 10)

Expand All @@ -288,15 +290,15 @@ def execute(self, prompt: str, project_name_from_user: str = None) -> str:
self.update_contextual_keywords(focus)
print(self.collected_context_keywords)

internal_monologue = self.internal_monologue.execute(current_prompt=plan)
internal_monologue = self.internal_monologue.execute(current_prompt=plan, project_name=project_name)
print(internal_monologue)
print("=====" * 10)

new_state = AgentState().new_state()
new_state["internal_monologue"] = internal_monologue
AgentState().add_to_current_state(project_name, new_state)

research = self.researcher.execute(plan, self.collected_context_keywords)
research = self.researcher.execute(plan, self.collected_context_keywords, project_name)
print(research)
print("=====" * 10)

Expand Down
6 changes: 3 additions & 3 deletions src/agents/answer/answer.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,14 @@ def validate_response(self, response: str):
else:
return response["response"]

def execute(self, conversation: list, code_markdown: str) -> str:
def execute(self, conversation: list, code_markdown: str, project_name: str) -> str:
prompt = self.render(conversation, code_markdown)
response = self.llm.inference(prompt)
response = self.llm.inference(prompt, project_name)

valid_response = self.validate_response(response)

while not valid_response:
print("Invalid response from the model, trying again...")
return self.execute(conversation, code_markdown)
return self.execute(conversation, code_markdown, project_name)

return valid_response
4 changes: 2 additions & 2 deletions src/agents/coder/coder.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,13 +102,13 @@ def execute(
project_name: str
) -> str:
prompt = self.render(step_by_step_plan, user_context, search_results)
response = self.llm.inference(prompt)
response = self.llm.inference(prompt, project_name)

valid_response = self.validate_response(response)

while not valid_response:
print("Invalid response from the model, trying again...")
return self.execute(step_by_step_plan, user_context, search_results)
return self.execute(step_by_step_plan, user_context, search_results, project_name)

print(valid_response)

Expand Down
6 changes: 3 additions & 3 deletions src/agents/decision/decision.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,14 @@ def validate_response(self, response: str):

return response

def execute(self, prompt: str) -> str:
def execute(self, prompt: str, project_name: str) -> str:
prompt = self.render(prompt)
response = self.llm.inference(prompt)
response = self.llm.inference(prompt, project_name)

valid_response = self.validate_response(response)

while not valid_response:
print("Invalid response from the model, trying again...")
return self.execute(prompt)
return self.execute(prompt, project_name)

return valid_response
2 changes: 1 addition & 1 deletion src/agents/feature/feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def execute(
project_name: str
) -> str:
prompt = self.render(conversation, code_markdown, system_os)
response = self.llm.inference(prompt)
response = self.llm.inference(prompt, project_name)

valid_response = self.validate_response(response)

Expand Down
4 changes: 2 additions & 2 deletions src/agents/formatter/formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def render(self, raw_text: str) -> str:
def validate_response(self, response: str) -> bool:
return True

def execute(self, raw_text: str) -> str:
def execute(self, raw_text: str, project_name: str) -> str:
raw_text = self.render(raw_text)
response = self.llm.inference(raw_text)
response = self.llm.inference(raw_text, project_name)
return response
6 changes: 3 additions & 3 deletions src/agents/internal_monologue/internal_monologue.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,15 +33,15 @@ def validate_response(self, response: str):
else:
return response["internal_monologue"]

def execute(self, current_prompt: str) -> str:
def execute(self, current_prompt: str, project_name: str) -> str:
current_prompt = self.render(current_prompt)
response = self.llm.inference(current_prompt)
response = self.llm.inference(current_prompt, project_name)

valid_response = self.validate_response(response)

while not valid_response:
print("Invalid response from the model, trying again...")
return self.execute(current_prompt)
return self.execute(current_prompt, project_name)

return valid_response

2 changes: 1 addition & 1 deletion src/agents/patcher/patcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def execute(
error,
system_os
)
response = self.llm.inference(prompt)
response = self.llm.inference(prompt, project_name)

valid_response = self.validate_response(response)

Expand Down
4 changes: 2 additions & 2 deletions src/agents/planner/planner.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def parse_response(self, response: str):

return result

def execute(self, prompt: str) -> str:
def execute(self, prompt: str, project_name: str) -> str:
prompt = self.render(prompt)
response = self.llm.inference(prompt)
response = self.llm.inference(prompt, project_name)
return response
7 changes: 4 additions & 3 deletions src/agents/reporter/reporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,17 @@ def validate_response(self, response: str):

def execute(self,
conversation: list,
code_markdown: str
code_markdown: str,
project_name: str
) -> str:
prompt = self.render(conversation, code_markdown)
response = self.llm.inference(prompt)
response = self.llm.inference(prompt, project_name)

valid_response = self.validate_response(response)

while not valid_response:
print("Invalid response from the model, trying again...")
return self.execute(conversation, code_markdown)
return self.execute(conversation, code_markdown, project_name)

return valid_response

6 changes: 3 additions & 3 deletions src/agents/researcher/researcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,16 +42,16 @@ def validate_response(self, response: str):
"ask_user": response["ask_user"]
}

def execute(self, step_by_step_plan: str, contextual_keywords: List[str]) -> str:
def execute(self, step_by_step_plan: str, contextual_keywords: List[str], project_name: str) -> str:
contextual_keywords = ", ".join(map(lambda k: k.capitalize(), contextual_keywords))
step_by_step_plan = self.render(step_by_step_plan, contextual_keywords)

response = self.llm.inference(step_by_step_plan)
response = self.llm.inference(step_by_step_plan, project_name)

valid_response = self.validate_response(response)

while not valid_response:
print("Invalid response from the model, trying again...")
return self.execute(step_by_step_plan, contextual_keywords)
return self.execute(step_by_step_plan, contextual_keywords, project_name)

return valid_response
4 changes: 2 additions & 2 deletions src/agents/runner/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def run_code(
error=command_output
)

response = self.llm.inference(prompt)
response = self.llm.inference(prompt, project_name)

valid_response = self.validate_rerunner_response(response)

Expand Down Expand Up @@ -233,7 +233,7 @@ def execute(
project_name: str
) -> str:
prompt = self.render(conversation, code_markdown, os_system)
response = self.llm.inference(prompt)
response = self.llm.inference(prompt, project_name)

valid_response = self.validate_response(response)

Expand Down
14 changes: 13 additions & 1 deletion src/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,15 @@


class Config:

_instance = None

def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance.config = toml.load("config.toml")
return cls._instance

def __init__(self):
self.config = toml.load("config.toml")

Expand All @@ -28,7 +37,10 @@ def get_openai_api_key(self):

def get_netlify_api_key(self):
return environ.get("NETLIFY_API_KEY", self.config["API_KEYS"]["NETLIFY"])


def get_groq_api_key(self):
return environ.get("GROQ_API_KEY", self.config["API_KEYS"]["GROQ"])

def get_sqlite_db(self):
return environ.get("SQLITE_DB_PATH", self.config["STORAGE"]["SQLITE_DB"])

Expand Down
2 changes: 1 addition & 1 deletion src/llm/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
from .llm import LLM, TOKEN_USAGE
from .llm import LLM
24 changes: 24 additions & 0 deletions src/llm/groq_client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
from groq import Groq

from src.config import Config

class Groq:
def __init__(self, api_key: str):
config = Config()
api_key = config.get_groq_api_key()
self.client = Groq(
api_key=api_key,
)

def inference(self, model_id: str, prompt: str) -> str:
chat_completion = self.client.chat.completions.create(
messages=[
{
"role": "user",
"content": prompt.strip(),
}
],
model=model_id,
)

return chat_completion.choices[0].message.content
Loading

0 comments on commit 4e58f3f

Please sign in to comment.