Skip to content

Commit

Permalink
Merge branch 'main' into patch-1
Browse files Browse the repository at this point in the history
  • Loading branch information
varshney-yash authored Mar 27, 2024
2 parents ac22917 + a52bd73 commit 523cbcf
Show file tree
Hide file tree
Showing 37 changed files with 576 additions and 107 deletions.
33 changes: 33 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@

.PHONY = setup deps compose-up compose-down compose-destroy

# to check if docker is installed on the machine
DOCKER := $(shell command -v docker)
DOCKER_COMPOSE := $(shell command -v docker-compose)
deps:
ifndef DOCKER
@echo "Docker is not available. Please install docker"
@echo "try running sudo apt-get install docker"
@exit 1
endif
ifndef DOCKER_COMPOSE
@echo "docker-compose is not available. Please install docker-compose"
@echo "try running sudo apt-get install docker-compose"
@exit 1
endif

setup:
sh +x build

compose-down: deps
docker volume ls
docker-compose ps
docker images
docker-compose down;

compose-up: deps compose-down
docker-compose up --build

compose-destroy: deps
docker images | grep -i devika | awk '{print $$3}' | xargs docker rmi -f
docker volume prune
18 changes: 11 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -145,12 +145,16 @@ To start using Devika, follow these steps:

Devika requires certain configuration settings and API keys to function properly. Update the `config.toml` file with the following information:

- `OPENAI_API_KEY`: Your OpenAI API key for accessing GPT models.
- `CLAUDE_API_KEY`: Your Anthropic API key for accessing Claude models.
- `BING_API_KEY`: Your Bing Search API key for web searching capabilities.
- `DATABASE_URL`: The URL for your database connection.
- `LOG_DIRECTORY`: The directory where Devika's logs will be stored.
- `PROJECT_DIRECTORY`: The directory where Devika's projects will be stored.
- `SQLITE_DB`: The path to the SQLite database file for storing Devika's data.
- `SCREENSHOTS_DIR`: The directory where screenshots captured by Devika will be stored.
- `PDFS_DIR`: The directory where PDF files processed by Devika will be stored.
- `PROJECTS_DIR`: The directory where Devika's projects will be stored.
- `LOGS_DIR`: The directory where Devika's logs will be stored.
- `REPOS_DIR`: The directory where Git repositories cloned by Devika will be stored.
- `BING`: Your Bing Search API key for web searching capabilities.
- `CLAUDE`: Your Anthropic API key for accessing Claude models.
- `NETLIFY`: Your Netlify API key for deploying and managing web projects.
- `OPENAI`: Your OpenAI API key for accessing GPT models.

Make sure to keep your API keys secure and do not share them publicly.

Expand Down Expand Up @@ -217,7 +221,7 @@ To join the Devika community Discord server, [click here](https://discord.com/in

## Contributing

We welcome contributions to enhance Devika's capabilities and improve its performance. To contribute, please see the `CONTRIBUTING.md` file for steps.
We welcome contributions to enhance Devika's capabilities and improve its performance. To contribute, please see the [`CONTRIBUTING.md`](CONTRIBUTING.md) file for steps.

## License

Expand Down
29 changes: 29 additions & 0 deletions app.dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
FROM debian:12

# setting up build variable
ARG VITE_API_BASE_URL
ENV VITE_API_BASE_URL=${VITE_API_BASE_URL}

# setting up os env
USER root
WORKDIR /home/nonroot/client
RUN groupadd -r nonroot && useradd -r -g nonroot -d /home/nonroot/client -s /bin/bash nonroot

# install node js
RUN apt-get update && apt-get upgrade
RUN apt-get install -y build-essential software-properties-common curl sudo wget git
RUN curl -fsSL https://deb.nodesource.com/setup_20.x | sudo -E bash -
RUN apt-get install nodejs

# copying devika app client only
COPY ui /home/nonroot/client/ui
COPY src /home/nonroot/client/src
COPY config.toml /home/nonroot/client/

RUN cd ui && npm install && npm install -g npm && npm install -g bun
RUN chown -R nonroot:nonroot /home/nonroot/client

USER nonroot
WORKDIR /home/nonroot/client/ui

ENTRYPOINT [ "npx", "bun", "run", "dev", "--", "--host" ]
6 changes: 6 additions & 0 deletions config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,12 @@ BING = "<YOUR_BING_API_KEY>"
CLAUDE = "<YOUR_CLAUDE_API_KEY>"
NETLIFY = "<YOUR_NETLIFY_API_KEY>"
OPENAI = "<YOUR_OPENAI_API_KEY>"
GROQ = "<YOUR_GROQ_API_KEY>"

[API_ENDPOINTS]
BING = "https://api.bing.microsoft.com/v7.0/search"
OLLAMA = "http://127.0.0.1:11434"

[LOGGING]
LOG_REST_API = "true"
LOG_PROMPTS = "false"
37 changes: 37 additions & 0 deletions devika.dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
FROM debian:12

# setting up os env
USER root
WORKDIR /home/nonroot/devika
RUN groupadd -r nonroot && useradd -r -g nonroot -d /home/nonroot/devika -s /bin/bash nonroot

ENV PYTHONUNBUFFERED 1
ENV PYTHONDONTWRITEBYTECODE 1

# setting up python3
RUN apt-get update && apt-get upgrade
RUN apt-get install -y build-essential software-properties-common curl sudo wget git
RUN apt-get install -y python3 python3-pip
RUN curl -fsSL https://astral.sh/uv/install.sh | sudo -E bash -
RUN $HOME/.cargo/bin/uv venv
ENV PATH="/home/nonroot/devika/.venv/bin:$HOME/.cargo/bin:$PATH"
RUN echo $PATH

# copy devika python engine only
RUN $HOME/.cargo/bin/uv venv
COPY requirements.txt /home/nonroot/devika/
RUN UV_HTTP_TIMEOUT=100000 $HOME/.cargo/bin/uv pip install -r requirements.txt
RUN playwright install --with-deps

COPY src /home/nonroot/devika/src
COPY config.toml /home/nonroot/devika/
COPY devika.py /home/nonroot/devika/
RUN chown -R nonroot:nonroot /home/nonroot/devika
RUN ls -al

USER nonroot
WORKDIR /home/nonroot/devika
ENV PATH="/home/nonroot/devika/.venv/bin:$HOME/.cargo/bin:$PATH"
RUN mkdir /home/nonroot/devika/db

ENTRYPOINT [ "python3", "-m", "devika" ]
5 changes: 3 additions & 2 deletions devika.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,9 @@ def calculate_tokens():
@app.route("/api/token-usage", methods=["GET"])
@route_logger(logger)
def token_usage():
from src.llm import TOKEN_USAGE
return jsonify({"token_usage": TOKEN_USAGE})
project_name = request.args.get("project_name")
token_count = AgentState().get_latest_token_usage(project_name)
return jsonify({"token_usage": token_count})


@app.route("/api/real-time-logs", methods=["GET"])
Expand Down
61 changes: 61 additions & 0 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
version: "3.9"

services:
ollama-service:
image: ollama/ollama:latest
expose:
- 11434
ports:
- 11434:11434
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:11434/ || exit 1"]
interval: 5s
timeout: 30s
retries: 5
start_period: 30s
networks:
- devika-subnetwork

devika-backend-engine:
build:
context: .
dockerfile: devika.dockerfile
depends_on:
- ollama-service
expose:
- 1337
ports:
- 1337:1337
environment:
- OLLAMA_HOST=http://ollama-service:11434
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:1337/ || exit 1"]
interval: 5s
timeout: 30s
retries: 5
start_period: 30s
volumes:
- devika-backend-dbstore:/home/nonroot/devika/db
networks:
- devika-subnetwork

devika-frontend-app:
build:
context: .
dockerfile: app.dockerfile
args:
- VITE_API_BASE_URL=http://127.0.0.1:1337
depends_on:
- devika-backend-engine
expose:
- 3000
ports:
- 3000:3000
networks:
- devika-subnetwork

networks:
devika-subnetwork:

volumes:
devika-backend-dbstore:
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,5 @@ keybert
GitPython
netlify-py
Markdown
xhtml2pdf
xhtml2pdf
groq
6 changes: 3 additions & 3 deletions src/agents/action/action.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,15 @@ def validate_response(self, response: str):
else:
return response["response"], response["action"]

def execute(self, conversation: list) -> str:
def execute(self, conversation: list, project_name: str) -> str:
prompt = self.render(conversation)
response = self.llm.inference(prompt)
response = self.llm.inference(prompt, project_name)

valid_response = self.validate_response(response)

while not valid_response:
print("Invalid response from the model, trying again...")
return self.execute(conversation)
return self.execute(conversation, project_name)

print("===" * 10)
print(valid_response)
Expand Down
24 changes: 13 additions & 11 deletions src/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,8 @@ def search_queries(self, queries: list, project_name: str) -> dict:
Formatter Agent is invoked to format and learn from the contents
"""
results[query] = self.formatter.execute(
browser.extract_text()
browser.extract_text(),
project_name
)

"""
Expand All @@ -118,7 +119,7 @@ def update_contextual_keywords(self, sentence: str):
Decision making Agent
"""
def make_decision(self, prompt: str, project_name: str) -> str:
decision = self.decision.execute(prompt)
decision = self.decision.execute(prompt, project_name)

for item in decision:
function = item["function"]
Expand All @@ -134,7 +135,7 @@ def make_decision(self, prompt: str, project_name: str) -> str:
elif function == "generate_pdf_document":
user_prompt = args["user_prompt"]
# Call the reporter agent to generate the PDF document
markdown = self.reporter.execute([user_prompt], "")
markdown = self.reporter.execute([user_prompt], "", project_name)
_out_pdf_file = PDF().markdown_to_pdf(markdown, project_name)

project_name_space_url = project_name.replace(" ", "%20")
Expand All @@ -154,10 +155,10 @@ def make_decision(self, prompt: str, project_name: str) -> str:
elif function == "coding_project":
user_prompt = args["user_prompt"]
# Call the planner, researcher, coder agents in sequence
plan = self.planner.execute(user_prompt)
plan = self.planner.execute(user_prompt, project_name)
planner_response = self.planner.parse_response(plan)

research = self.researcher.execute(plan, self.collected_context_keywords)
research = self.researcher.execute(plan, self.collected_context_keywords, project_name)
search_results = self.search_queries(research["queries"], project_name)

code = self.coder.execute(
Expand All @@ -177,7 +178,7 @@ def subsequent_execute(self, prompt: str, project_name: str) -> str:
conversation = ProjectManager().get_all_messages_formatted(project_name)
code_markdown = ReadCode(project_name).code_set_to_markdown()

response, action = self.action.execute(conversation)
response, action = self.action.execute(conversation, project_name)

ProjectManager().add_message_from_devika(project_name, response)

Expand All @@ -188,7 +189,8 @@ def subsequent_execute(self, prompt: str, project_name: str) -> str:
if action == "answer":
response = self.answer.execute(
conversation=conversation,
code_markdown=code_markdown
code_markdown=code_markdown,
project_name=project_name
)
ProjectManager().add_message_from_devika(project_name, response)
elif action == "run":
Expand Down Expand Up @@ -238,7 +240,7 @@ def subsequent_execute(self, prompt: str, project_name: str) -> str:

self.patcher.save_code_to_project(code, project_name)
elif action == "report":
markdown = self.reporter.execute(conversation, code_markdown)
markdown = self.reporter.execute(conversation, code_markdown, project_name)

_out_pdf_file = PDF().markdown_to_pdf(markdown, project_name)

Expand All @@ -261,7 +263,7 @@ def execute(self, prompt: str, project_name_from_user: str = None) -> str:
if project_name_from_user:
ProjectManager().add_message_from_user(project_name_from_user, prompt)

plan = self.planner.execute(prompt)
plan = self.planner.execute(prompt, project_name_from_user)
print(plan)
print("=====" * 10)

Expand All @@ -288,15 +290,15 @@ def execute(self, prompt: str, project_name_from_user: str = None) -> str:
self.update_contextual_keywords(focus)
print(self.collected_context_keywords)

internal_monologue = self.internal_monologue.execute(current_prompt=plan)
internal_monologue = self.internal_monologue.execute(current_prompt=plan, project_name=project_name)
print(internal_monologue)
print("=====" * 10)

new_state = AgentState().new_state()
new_state["internal_monologue"] = internal_monologue
AgentState().add_to_current_state(project_name, new_state)

research = self.researcher.execute(plan, self.collected_context_keywords)
research = self.researcher.execute(plan, self.collected_context_keywords, project_name)
print(research)
print("=====" * 10)

Expand Down
6 changes: 3 additions & 3 deletions src/agents/answer/answer.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,14 +40,14 @@ def validate_response(self, response: str):
else:
return response["response"]

def execute(self, conversation: list, code_markdown: str) -> str:
def execute(self, conversation: list, code_markdown: str, project_name: str) -> str:
prompt = self.render(conversation, code_markdown)
response = self.llm.inference(prompt)
response = self.llm.inference(prompt, project_name)

valid_response = self.validate_response(response)

while not valid_response:
print("Invalid response from the model, trying again...")
return self.execute(conversation, code_markdown)
return self.execute(conversation, code_markdown, project_name)

return valid_response
4 changes: 2 additions & 2 deletions src/agents/coder/coder.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,13 +102,13 @@ def execute(
project_name: str
) -> str:
prompt = self.render(step_by_step_plan, user_context, search_results)
response = self.llm.inference(prompt)
response = self.llm.inference(prompt, project_name)

valid_response = self.validate_response(response)

while not valid_response:
print("Invalid response from the model, trying again...")
return self.execute(step_by_step_plan, user_context, search_results)
return self.execute(step_by_step_plan, user_context, search_results, project_name)

print(valid_response)

Expand Down
Loading

0 comments on commit 523cbcf

Please sign in to comment.