Skip to content

Commit

Permalink
fixed comment to prerelease
Browse files Browse the repository at this point in the history
  • Loading branch information
vyokky committed Mar 8, 2024
1 parent e47a9f9 commit 6c00eb6
Show file tree
Hide file tree
Showing 12 changed files with 292 additions and 138 deletions.
8 changes: 7 additions & 1 deletion learner/xml_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,13 @@ def get_microsoft_document_text(self, file: str):
:param file: The file to get the text for.
:return: The text for the given file.
"""
return UnstructuredXMLLoader(file).load()[0].page_content

try:
doc_text = UnstructuredXMLLoader(file).load()[0].page_content
except:
doc_text = None

return doc_text


def construct_document_list(self):
Expand Down
5 changes: 4 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
art==6.1
colorama==0.4.6
langchain==0.1.11
langchain_community==0.0.27
msal==1.25.0
openai==1.11.1
openai==1.13.3
Pillow==10.2.0
pywin32==304
pywinauto==0.6.8
PyYAML==6.0.1
Requests==2.31.0
4 changes: 2 additions & 2 deletions ufo/config/config.yaml.template
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
version: 0.1

API_TYPE: "openai" # The API type, "openai" for the OpenAI API, "aoai" for the AOAI API.
API_TYPE: "openai" # The API type, "openai" for the OpenAI API, "aoai" for the AOAI API, "azure_ad" for aoai aad.
OPENAI_API_BASE: "YOUR_ENDPOINT" # for the OpenAI API.
OPENAI_API_KEY: "YOUR_API_KEY" # The OpenAI API key
AOAI_DEPLOYMENT: "YOUR_AOAI_DEPLOYMENT" # Your AOAI deployment if apply
API_VERSION: "2024-02-15-preview" # For GPT4-visual, the value usually be the "2023-12-01-preview"
API_VERSION: "2024-02-15-preview" # "2024-02-15-preview" by default.
OPENAI_API_MODEL: "gpt-4-vision-preview" # The only OpenAI model by now that accepts visual input
BING_API_KEY: "YOUR_BING_SEARCH_API_KEY" # The Bing search API key

Expand Down
14 changes: 13 additions & 1 deletion ufo/llm/azure_ad.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,19 @@
from ..config.config import load_config

configs = load_config()
available_models = Literal[ #only GPT4V could be used
available_models = Literal[
"gpt-35-turbo-20220309",
"gpt-35-turbo-16k-20230613",
"gpt-35-turbo-20230613",
"gpt-35-turbo-1106",

"gpt-4-20230321",
"gpt-4-20230613",
"gpt-4-32k-20230321",
"gpt-4-32k-20230613",
"gpt-4-1106-preview",
"gpt-4-0125-preview",

"gpt-4-visual-preview",
]

Expand Down
5 changes: 4 additions & 1 deletion ufo/llm/llm_call.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from ..config.config import load_config
from ..utils import print_with_color
from .azure_ad import get_chat_completion
import json


configs = load_config()
Expand Down Expand Up @@ -58,12 +59,14 @@ def get_gptv_completion(messages, headers):
temperature = configs["TEMPERATURE"],
top_p = configs["TOP_P"],
)

response_json = json.loads(response.model_dump_json())

if "error" not in response:
usage = response.usage
prompt_tokens = usage.prompt_tokens
completion_tokens = usage.completion_tokens
response_json = response


cost = prompt_tokens / 1000 * 0.01 + completion_tokens / 1000 * 0.03

Expand Down
2 changes: 1 addition & 1 deletion ufo/llm/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def action_selection_prompt_construction(prompt_template: str, request_history:



def retrived_documents_prompt_construction(header: str, separator: str, documents: list):
def retrived_documents_prompt_helper(header: str, separator: str, documents: list):
"""
Construct the prompt for retrieved documents.
:param header: The header of the prompt.
Expand Down
31 changes: 14 additions & 17 deletions ufo/module/flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from art import text2art
from pywinauto.uia_defines import NoPatternInterfaceError

from ..rag import retriever
from ..rag import retriever_factory
from ..config.config import load_config
from ..llm import llm_call
from ..llm import prompt as prompter
Expand Down Expand Up @@ -94,7 +94,7 @@ def process_application_selection(self, headers):

except Exception as e:
log = json.dumps({"step": self.step, "status": str(e), "prompt": app_selection_prompt_message})
print_with_color("Error occurs when calling LLM.", "red")
print_with_color("Error occurs when calling LLM: {e}".format(e=str(e)), "red")
self.request_logger.info(log)
self.status = "ERROR"
return
Expand All @@ -103,11 +103,9 @@ def process_application_selection(self, headers):
self.cost += cost

try:
aad = configs['API_TYPE'].lower() == 'azure_ad'
if not aad:
response_string = response["choices"][0]["message"]["content"]
else:
response_string = response.choices[0].message.content

response_string = response["choices"][0]["message"]["content"]

response_json = json_parser(response_string)

application_label = response_json["ControlLabel"]
Expand Down Expand Up @@ -154,10 +152,12 @@ def process_application_selection(self, headers):

if configs["RAG_OFFLINE_DOCS"]:
print_with_color("Loading offline document indexer for {app}...".format(app=self.application), "magenta")
self.offline_doc_retriever = retriever.create_offline_doc_retriever(self.application)
offline_retriever_factory = retriever_factory.OfflineDocRetrieverFactory(self.application)
self.offline_doc_retriever = offline_retriever_factory.create_offline_doc_retriever()
if configs["RAG_ONLINE_SEARCH"]:
print_with_color("Creating a Bing search indexer...", "magenta")
self.online_doc_retriever = retriever.create_online_search_retriever(self.request)
offline_retriever_factory = retriever_factory.OnlineDocRetrieverFactory(self.request)
self.online_doc_retriever = offline_retriever_factory.create_online_search_retriever()

time.sleep(configs["SLEEP_TIME"])

Expand Down Expand Up @@ -228,7 +228,7 @@ def process_action_selection(self, headers):
response, cost = llm_call.get_gptv_completion(action_selection_prompt_message, headers)
except Exception as e:
log = json.dumps({"step": self.step, "status": str(e), "prompt": action_selection_prompt_message})
print_with_color("Error occurs when calling LLM.", "red")
print_with_color("Error occurs when calling LLM: {e}".format(e=str(e)), "red")
self.request_logger.info(log)
self.status = "ERROR"
time.sleep(configs["SLEEP_TIME"])
Expand All @@ -237,11 +237,8 @@ def process_action_selection(self, headers):
self.cost += cost

try:
aad = configs['API_TYPE'].lower() == 'azure_ad'
if not aad:
response_string = response["choices"][0]["message"]["content"]
else:
response_string = response.choices[0].message.content

response_string = response["choices"][0]["message"]["content"]
response_json = json_parser(response_string)

observation = response_json["Observation"]
Expand Down Expand Up @@ -335,12 +332,12 @@ def rag_prompt(self):
retrieved_docs = ""
if self.offline_doc_retriever:
offline_docs = self.offline_doc_retriever.retrieve("How to {query} for {app}".format(query=self.request, app=self.application), configs["RAG_OFFLINE_DOCS_RETRIEVED_TOPK"], filter=None)
offline_docs_prompt = prompter.retrived_documents_prompt_construction("Help Documents", "Document", [doc.metadata["text"] for doc in offline_docs])
offline_docs_prompt = prompter.retrived_documents_prompt_helper("Help Documents", "Document", [doc.metadata["text"] for doc in offline_docs])
retrieved_docs += offline_docs_prompt

if self.online_doc_retriever:
online_search_docs = self.online_doc_retriever.retrieve(self.request, configs["RAG_ONLINE_RETRIEVED_TOPK"], filter=None)
online_docs_prompt = prompter.retrived_documents_prompt_construction("Online Search Results", "Search Result", [doc.page_content for doc in online_search_docs])
online_docs_prompt = prompter.retrived_documents_prompt_helper("Online Search Results", "Search Result", [doc.page_content for doc in online_search_docs])
retrieved_docs += online_docs_prompt

return retrieved_docs
Expand Down
109 changes: 0 additions & 109 deletions ufo/rag/retriever.py

This file was deleted.

Loading

0 comments on commit 6c00eb6

Please sign in to comment.