Skip to content

Commit ebf2227

Browse files
committed
refactor app, evaluate can now run and evaluate entire app
1 parent 3dde931 commit ebf2227

File tree

17 files changed

+306
-293
lines changed

17 files changed

+306
-293
lines changed

src/api/api/__init__.py

Lines changed: 0 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -1,40 +0,0 @@
1-
from flask import Flask
2-
import logging
3-
import api.get_article as get_article
4-
5-
import os
6-
from opentelemetry import trace
7-
from opentelemetry.sdk.trace import TracerProvider
8-
from opentelemetry.sdk.trace.export import BatchSpanProcessor
9-
from opentelemetry.sdk.trace.sampling import ParentBasedTraceIdRatio
10-
from promptflow.tracing._integrations._openai_injector import inject_openai_api
11-
from azure.monitor.opentelemetry.exporter import AzureMonitorTraceExporter
12-
from promptflow.tracing import start_trace
13-
14-
def create_app():
15-
app = Flask(__name__)
16-
app.register_blueprint(get_article.bp)
17-
init_logging()
18-
return app
19-
20-
21-
def init_logging():
22-
"""Initializes logging."""
23-
24-
# log to app insights if configured
25-
if 'APPLICATIONINSIGHTS_CONNECTION_STRING' in os.environ:
26-
inject_openai_api()
27-
28-
connection_string=os.environ['APPLICATIONINSIGHTS_CONNECTION_STRING']
29-
trace.set_tracer_provider(TracerProvider(sampler=ParentBasedTraceIdRatio(1.0)))
30-
trace.get_tracer_provider().add_span_processor(BatchSpanProcessor(AzureMonitorTraceExporter(connection_string=connection_string)))
31-
32-
if 'PROMPTFLOW_TRACING_SERVER' in os.environ and os.environ['PROMPTFLOW_TRACING_SERVER'] != 'false':
33-
start_trace()
34-
35-
36-
37-
logging.basicConfig(
38-
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
39-
)
40-
logging.info("Logging initialized.")

src/api/api/agents/designer/designer.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
load_dotenv()
66

7-
def design(context, instructions, feedback):
7+
def design(request, instructions, feedback):
88
# Load prompty with AzureOpenAIModelConfiguration override
99
configuration = AzureOpenAIModelConfiguration(
1010
azure_deployment=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"),
@@ -21,7 +21,7 @@ def design(context, instructions, feedback):
2121
"editor.prompty", model=override_model)
2222

2323
result = prompty_obj(
24-
context=context,
24+
context=request,
2525
instructions=instructions,
2626
feedback=feedback,
2727
)
@@ -30,7 +30,7 @@ def design(context, instructions, feedback):
3030

3131
if __name__ == "__main__":
3232
result = design(
33-
"The context for the designer.",
33+
"The request for the designer.",
3434
"The instructions for the designer.",
3535
"The feedback for the designer.")
3636
print(result)

src/api/api/agents/editor/editor.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
load_dotenv()
1010

1111
def edit(article, feedback):
12-
12+
1313
# Load prompty with AzureOpenAIModelConfiguration override
1414
configuration = AzureOpenAIModelConfiguration(
1515
azure_deployment=os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME"),
@@ -24,7 +24,6 @@ def edit(article, feedback):
2424
path_to_prompty = folder + "/editor.prompty"
2525

2626
prompty_obj = Prompty.load(path_to_prompty, model=override_model)
27-
2827
result = prompty_obj(article=article, feedback=feedback,)
2928

3029
return result

src/api/api/agents/orchestrator.py

Lines changed: 81 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,21 @@
11
import json
2-
from promptflow.tracing import trace, start_trace
2+
import logging
3+
from promptflow.tracing import trace
34
from api.agents.researcher import researcher
45
from api.agents.writer import writer
56
from api.agents.editor import editor
67
from api.agents.designer import designer
78
from api.agents.product import product
9+
from api.logging import log_output
10+
from api.evaluate.evaluators import evaluate_article_in_background
11+
812
from dotenv import load_dotenv
913
load_dotenv()
1014

1115
@trace
12-
def get_research(context, instructions, feedback):
13-
16+
def get_research(request, instructions, feedback):
1417
research_result = researcher.research(
15-
context=context,
18+
request=request,
1619
instructions=instructions,
1720
feedback=feedback
1821
)
@@ -23,10 +26,9 @@ def get_research(context, instructions, feedback):
2326

2427

2528
@trace
26-
def get_writer(context, feedback, instructions, research=[], products=[]):
27-
29+
def get_writer(request, feedback, instructions, research=[], products=[]):
2830
writer_reponse = writer.write(
29-
context=context, feedback=feedback, instructions=instructions, research=research, products=products
31+
request=request, feedback=feedback, instructions=instructions, research=research, products=products
3032
)
3133
print(json.dumps(writer_reponse, indent=2))
3234
return writer_reponse
@@ -41,12 +43,13 @@ def get_editor(article, feedback):
4143

4244

4345
@trace
44-
def get_designer(context, instructions, feedback):
45-
designer_task = designer.design(context, instructions, feedback)
46+
def get_designer(request, instructions, feedback):
47+
designer_task = designer.design(request, instructions, feedback)
4648
print(json.dumps(designer_task, indent=2))
4749
return designer_task
4850

4951

52+
# TODO: delete, I dont think this is used...
5053
@trace
5154
def regenerate_process(editor_response, context, instructions, product_documenation):
5255
# Get feedback for research from writer
@@ -72,71 +75,78 @@ def regenerate_process(editor_response, context, instructions, product_documenat
7275
)
7376
return editor_response
7477

75-
7678
@trace
77-
def get_article(context, instructions):
78-
# This code is dup in api response to ueild steped results. TODO: Fix this so its not dup later
79-
80-
feedback = ""
81-
print("Getting article for context: ", context)
82-
83-
# researcher task look up the info
84-
print("Getting researcher task output...")
85-
research_result = get_research(context, instructions, feedback)
86-
product_documenation = product.get_products(context)
87-
88-
# then send it to the writer, the writer writes the article
89-
print("Getting writer task output...")
90-
writer_reponse = get_writer(
91-
context, feedback, instructions, research=research_result, products=product_documenation
92-
)
79+
def write_article(request, instructions, evaluate=False):
80+
log_output("Article generation started for request: %s, instructions: %s", request, instructions)
81+
82+
feedback = "No Feedback"
83+
84+
# Researcher task look up the info
85+
yield ("message", "Starting research agent task...")
86+
log_output("Getting researcher task output...")
87+
research_result = get_research(request, instructions, feedback)
88+
yield ("researcher", research_result)
89+
90+
# Retrieve product information relevant to the user's query
91+
log_output("Product information...")
92+
product_documenation = product.get_products(request)
93+
yield ("products", product_documenation)
94+
95+
# Then send it to the writer, the writer writes the article
96+
yield ("message", "Starting writer agent task...")
97+
log_output("Getting writer task output...")
98+
writer_response = get_writer(request, feedback, instructions, research=research_result, products=product_documenation)
99+
yield ("writer", writer_response)
100+
101+
# Then send it to the editor, to decide if it's good or not
102+
yield ("message", "Starting editor agent task...")
103+
log_output("Getting editor task output...")
104+
editor_response = get_editor(writer_response["article"], writer_response["feedback"])
105+
log_output("Editor response: %s", editor_response)
106+
107+
yield ("editor", editor_response)
108+
retry_count = 0
109+
while(str(editor_response["decision"]).lower().startswith("accept")):
110+
yield ("message", f"Sending editor feedback ({retry_count + 1})...")
111+
log_output("Regeneration attempt %d based on editor feedback", retry_count + 1)
112+
113+
# Regenerate with feedback loop
114+
researchFeedback = editor_response.get("researchFeedback", "No Feedback")
115+
editorFeedback = editor_response.get("editorFeedback", "No Feedback")
116+
117+
research_result = get_research(request, instructions, researchFeedback)
118+
yield ("researcher", research_result)
119+
120+
writer_response = get_writer(request, editorFeedback, instructions, research=research_result, products=product_documenation)
121+
yield ("writer", writer_response)
122+
123+
editor_response = get_editor(writer_response["article"], writer_response["feedback"])
124+
yield ("editor", editor_response)
125+
126+
retry_count += 1
127+
if retry_count >= 2:
128+
break
129+
130+
log_output("Editor accepted article after %d iterations", retry_count)
131+
yield ("message", "Editor accepted article")
132+
133+
if evaluate:
134+
evaluate_article_in_background(
135+
request=request,
136+
instructions=instructions,
137+
research=research_result,
138+
products=product_documenation,
139+
article=writer_response
140+
)
141+
142+
# Log final editor response
143+
log_output("Final editor response: %s", json.dumps(editor_response, indent=2))
93144

94-
# then send it to the editor, to decide if it's good or not
95-
print("Getting editor task output...")
96-
editor_response = get_editor(
97-
writer_reponse["article"], writer_reponse["feedback"]
98-
)
99-
print(editor_response)
100-
101-
# retry until decision is accept or until 2x tries
102-
if editor_response["decision"] == "reject":
103-
print("Editor rejected writer, sending back to writer (1)...")
104-
# retry research, writer, and editor with feedback from writer and editor
105-
editor_response = regenerate_process(editor_response, context, instructions, product_documenation)
106-
107-
if editor_response["decision"] == "reject":
108-
print("Editor rejected writer again, sending back to writer (2)...")
109-
# retry research, writer, and editor with feedback from writer and editor
110-
editor_response = regenerate_process(editor_response, context, product_documenation)
111-
112-
print("Editor accepted writer and research, sending to designer...")
113-
# SETH TODO: send to designer
114-
# designer_task = designer.design(context, instructions, feedback)
115-
# create result object with editor response and writer response
116-
result = {"editor_response": editor_response, "writer_response": writer_reponse}
117-
print(json.dumps(result, indent=2))
118-
return result
119-
120145
if __name__ == "__main__":
121-
import os
122-
from opentelemetry import trace
123-
from opentelemetry.sdk.trace import TracerProvider
124-
from opentelemetry.sdk.trace.export import BatchSpanProcessor
125-
from azure.monitor.opentelemetry.exporter import AzureMonitorTraceExporter
126-
from opentelemetry.sdk.trace.sampling import ParentBasedTraceIdRatio
127-
from promptflow.tracing._integrations._openai_injector import inject_openai_api
128-
129-
# log to app insights if configured
130-
if 'APPLICATIONINSIGHTS_CONNECTION_STRING' in os.environ:
131-
inject_openai_api()
132-
133-
connection_string=os.environ['APPLICATIONINSIGHTS_CONNECTION_STRING']
134-
trace.set_tracer_provider(TracerProvider(sampler=ParentBasedTraceIdRatio(1.0)))
135-
trace.get_tracer_provider().add_span_processor(BatchSpanProcessor(AzureMonitorTraceExporter(connection_string=connection_string)))
136-
137-
if 'PROMPTFLOW_TRACING_SERVER' in os.environ and os.environ['PROMPTFLOW_TRACING_SERVER'] != 'false':
138-
start_trace()
146+
from api.logging import init_logging
139147

148+
init_logging()
140149
context = "Can you find the latest camping trends and what folks are doing in the winter?"
141150
instructions = "Can you find the relevant information needed and good places to visit"
142-
get_article(context, instructions)
151+
for result in write_article(context, instructions, evaluate=True):
152+
print(*result)

src/api/api/agents/product/ai_search.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212

1313
def retrieve_documentation(
14-
question: str,
14+
request: str,
1515
index_name: str,
1616
embedding: List[float],
1717
) -> str:
@@ -27,7 +27,7 @@ def retrieve_documentation(
2727
)
2828

2929
results = search_client.search(
30-
search_text=question,
30+
search_text=request,
3131
vector_queries=[vector_query],
3232
query_type=QueryType.SEMANTIC,
3333
semantic_configuration_name="default",

src/api/api/agents/product/product.py

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import os
22
import json
3-
from typing import Dict, List
3+
from typing import Dict
44
from openai import AzureOpenAI
55

66
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
@@ -13,10 +13,10 @@
1313
load_dotenv()
1414

1515
@trace
16-
def get_context(question, embedding):
17-
return retrieve_documentation(question=question, index_name="contoso-products", embedding=embedding)
16+
def get_context(request, embedding):
17+
return retrieve_documentation(request=request, index_name="contoso-products", embedding=embedding)
1818

19-
def get_embedding(question: str):
19+
def get_embedding(request: str):
2020
token_provider = get_bearer_token_provider(
2121
DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default"
2222
)
@@ -28,18 +28,15 @@ def get_embedding(question: str):
2828
)
2929

3030
return client.embeddings.create(
31-
input=question,
31+
input=request,
3232
model="text-embedding-ada-002"
3333
).data[0].embedding
3434

35-
36-
def get_products(context: str) -> Dict[str, any]:
37-
embedding = get_embedding(context)
38-
products = get_context(context, embedding)
39-
print(products)
35+
def get_products(request: str) -> Dict[str, any]:
36+
embedding = get_embedding(request)
37+
products = get_context(request, embedding)
4038
return products
4139

42-
4340
if __name__ == "__main__":
4441
context = "what kind of jackets do you have?"
4542
answer = get_products(context)

src/api/api/agents/researcher/researcher.prompty

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ Your queries should be descriptive and match the context and feedback provided.
4444
Use this context to formulate your queries, market, and tools you will use to description
4545
your research:
4646

47-
{{context}}
47+
{{request}}
4848

4949
# Feedback
5050
Use this feedback to help you refine your queries and responses - if there is any feedback:

src/api/api/agents/researcher/researcher.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def find_news(query, market="en-US"):
7070
return articles
7171

7272
@trace
73-
def execute(context: str, instructions: str, feedback: str = ""):
73+
def execute(request: str, instructions: str, feedback: str = ""):
7474
"""Assign a research task to a researcher"""
7575
functions = {
7676
"find_information": find_information,
@@ -80,7 +80,7 @@ def execute(context: str, instructions: str, feedback: str = ""):
8080
fns = prompty.execute(
8181
"researcher.prompty",
8282
inputs={
83-
"context": context,
83+
"request": request,
8484
"instructions": instructions,
8585
"feedback": feedback,
8686
},
@@ -129,8 +129,8 @@ def process(research):
129129
}
130130

131131

132-
def research(context, instructions, feedback: str = ""):
133-
r = execute(context=context, instructions=instructions, feedback=feedback)
132+
def research(request, instructions, feedback: str = ""):
133+
r = execute(request=request, instructions=instructions, feedback=feedback)
134134
p = process(r)
135135
return p
136136

0 commit comments

Comments
 (0)