Skip to content

Commit

Permalink
Update drive_tools.py and openai_model_tools.py
Browse files Browse the repository at this point in the history
  • Loading branch information
ExplorerGT92 committed Jan 6, 2024
1 parent feccaba commit 831c26c
Show file tree
Hide file tree
Showing 3 changed files with 105 additions and 90 deletions.
54 changes: 13 additions & 41 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
ask_chat_gpt_4_0314_asynchronous,
ask_chat_gpt_4_0613_synchronous,
ask_chat_gpt_4_0613_asynchronous,
ask_gpt_4_vision,
)
from utils.openai_dalle_tools import generate_an_image_with_dalle3
from utils.core_tools import get_current_date_time, display_help
Expand Down Expand Up @@ -382,19 +383,13 @@ async def main():
"ask_chat_gpt_4_0613_synchronous": ask_chat_gpt_4_0613_synchronous,
"ask_chat_gpt_4_0613_asynchronous": ask_chat_gpt_4_0613_asynchronous,
"generate_an_image_with_dalle3": generate_an_image_with_dalle3,
"ask_gpt_4_vision": ask_gpt_4_vision,
# Add more core functions here
}
logging.info('Initialized available functions line 385')

# Define the available core tools
# Define core tools here
tools = [
{
"type": "function",
"function": {
"name": "get_current_date_time",
"description": "Get the current date and time from the local machine.",
},
},
{
"type": "function",
"function": {
Expand Down Expand Up @@ -498,42 +493,21 @@ async def main():
{
"type": "function",
"function": {
"name": "generate_an_image_with_dalle3",
"description": "This function allows you to generate an image with DALL-E 3.",
"name": "ask_gpt_4_vision",
"description": "Ask GPT-4 Vision a question about a specific image file located in the 'uploads' folder.",
"parameters": {
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "The prompt to use for image generation. 4000 characters max.",
},
"n": {
"type": "integer",
"description": "The number of images to generate. 10 max.",
},
"size": {
"type": "string",
"description": "The size of the image to generate. 1024x1024, 1792x1024, or 1024x1792.",
},
"quality": {
"type": "string",
"description": "The quality of the image to generate. standard or hd.",
},
"style": {
"type": "string",
"description": "The style of the image to generate. natural or vivid.",
},
"response_format": {
"image_name": {
"type": "string",
"description": "The format of the response. b64_json or url.",
"description": "The name of the image file in the 'uploads' folder.",
},
},
"required": ["prompt"],
"required": ["image_name"],
},
},
},
]
logging.info('Defined available core tools line 532')

# Use the load_plugins_and_get_tools function to conditionally add tools
available_functions, tools = await enable_plugins(
Expand All @@ -544,15 +518,14 @@ async def main():

# Initialize the conversation memory
memory = []
logging.info('Initialized conversation memory line 543')

# Main Loop
while True:
# Ask the user for input
user_input = Prompt.ask(
"\nHow can I be of assistance? ([yellow]/tools[/yellow] or [bold yellow]quit[/bold yellow])",
)
logging.info('Line 551 received user input: %s', user_input)
logging.info('Line 555 received user input: %s', user_input)

# Check if the user wants to exit the program
if user_input.lower() == "quit":
Expand All @@ -570,19 +543,18 @@ async def main():
messages = [
{
"role": "system",
"content": "You are an AI Assistant integrated within a Python-based application designed to assist users by leveraging a suite of tools and functions, both synchronous and asynchronous, to process user requests and manage dynamic workflows. Your capabilities include interacting with a larger AI language model (LLM) for synchronous and asynchronous assistance, accessing the current date and time, and utilizing enabled plugins for additional functionalities. You are expected to maintain a conversation memory, ensuring the context remains within the token limit for efficient processing. When responding to user requests, consider the available tools and their descriptions, dynamically structuring workflows to include multiple turns where necessary. Prioritize reasoning and delivering the best possible response based on the users original request, taking into account the data gathered and actions completed during the interaction. Ensure that your responses are clear, concise, and directly address the users needs, while also being prepared to handle errors or unexpected situations gracefully.",
"content": f"{MAIN_SYSTEM_PROMPT}",
},
{
"role": "assistant",
"content": "Understood. As we continue, feel free to direct any requests or tasks you'd like assistance with. Whether it's querying information, managing schedules, processing data, or utilizing any of the tools and functionalities I have available, I'm here to help. Just let me know what you need, and I'll do my best to assist you effectively and efficiently.",
},
{"role": "user", "content": f"{user_input}"},
]
logging.info('Line 542 prepared conversation messages')
logging.info('Line 581 prepared conversation messages')

# Start the spinner
with live_spinner:
# Start the spinner

live_spinner.start()

# Pass the user input and memory to the run_conversation function
Expand Down Expand Up @@ -620,7 +592,7 @@ async def main():

# Remove tools from the tools list after processing
tools[:] = [tool for tool in tools if not tool.get("function", {}).get("name", "").lower() in user_input.lower()]
logging.info('Removed used tools from the tools list line 619')
logging.info('Removed used tools from the tools list line 622')


# Run the main function
Expand Down
17 changes: 0 additions & 17 deletions plugins/_gmail_plugin/drive_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,23 +243,6 @@ async def list_files(drive_service, folder_name='root', max_results=10):
}
},
},
{
"type": "function",
"function": {
"name": "upload_file",
"description": "Upload or update a file to Google Drive based on user input.",
"parameters": {
"type": "object",
"properties": {
"user_input": {
"type": "string",
"description": "The user input containing file name, content, and other metadata.",
}
},
"required": ["user_input"],
},
},
},
{
"type": "function",
"function": {
Expand Down
124 changes: 92 additions & 32 deletions utils/openai_model_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,16 @@
This file contains the core tools for the AI Assistant.
"""

import requests
import base64
from openai import OpenAI, AsyncOpenAI
from rich.console import Console
from pathlib import Path
from plugins._gmail_plugin.drive_tools import (
available_functions,
)
from
from config import (
live_spinner,
OPENAI_API_KEY,
OPENAI_ORG_ID,
)
Expand Down Expand Up @@ -93,16 +98,15 @@ async def ask_chat_gpt_4_0314_asynchronous(**kwargs) -> str:
{"role": "assistant", "content": text},
]

with live_spinner:
response = await gpt4_client_async.chat.completions.create(
model="gpt-4-0314",
messages=messages,
temperature=0.2,
max_tokens=2048,
top_p=0.5,
frequency_penalty=0,
presence_penalty=0,
)
response = await gpt4_client_async.chat.completions.create(
model="gpt-4-0314",
messages=messages,
temperature=0.2,
max_tokens=2048,
top_p=0.5,
frequency_penalty=0,
presence_penalty=0,
)

if (
response.choices
Expand Down Expand Up @@ -136,16 +140,15 @@ def ask_chat_gpt_4_0613_synchronous(**kwargs) -> str:
{"role": "assistant", "content": text},
]

with live_spinner:
response = gpt4_client.chat.completions.create(
model="gpt-4-613",
messages=messages,
temperature=0.2,
max_tokens=2048,
top_p=0.5,
frequency_penalty=0,
presence_penalty=0,
)
response = gpt4_client.chat.completions.create(
model="gpt-4-613",
messages=messages,
temperature=0.2,
max_tokens=2048,
top_p=0.5,
frequency_penalty=0,
presence_penalty=0,
)

# Check if the response has the expected structure and content
if (
Expand Down Expand Up @@ -181,16 +184,15 @@ async def ask_chat_gpt_4_0613_asynchronous(**kwargs) -> str:
{"role": "assistant", "content": text},
]

with live_spinner:
response = await gpt4_client_async.chat.completions.create(
model="gpt-4-0613",
messages=messages,
temperature=0.2,
max_tokens=2048,
top_p=0.5,
frequency_penalty=0,
presence_penalty=0,
)
response = await gpt4_client_async.chat.completions.create(
model="gpt-4-0613",
messages=messages,
temperature=0.2,
max_tokens=2048,
top_p=0.5,
frequency_penalty=0,
presence_penalty=0,
)

if (
response.choices
Expand All @@ -200,3 +202,61 @@ async def ask_chat_gpt_4_0613_asynchronous(**kwargs) -> str:
return response.choices[0].message.content
else:
return "An error occurred or no content was returned."


# Function to encode the image
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')


# Function to send the image to the vision model
async def ask_gpt_4_vision(image_name, drive_service=None):
# Check if the image exists in the local uploads folder
local_image_path = Path("uploads") / image_name
if local_image_path.is_file():
base64_image = encode_image(local_image_path)
else:
# If not found locally, search in Google Drive (if drive_service is provided)
if drive_service:
files_info = await available_functions["list_files"](drive_service, "MyDrive/GPT_ALL/uploads")
file_id = next((f['id'] for f in files_info if f['name'] == image_name), None)
if file_id:
# Download the file from Google Drive
local_image_path = await available_functions["download_file"](drive_service, file_id, "uploads/")
base64_image = encode_image(local_image_path)
else:
return "Image not found in local uploads folder or Google Drive."
else:
return "Image not found in local uploads folder."

# Send the request to the vision model
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {OPENAI_API_KEY}"
}

payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "question"
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
}
}
]
}
],
"max_tokens": 600
}

response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
return response.json()

0 comments on commit 831c26c

Please sign in to comment.