diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ff3243764..dd09e9627 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,15 +5,12 @@ default_stages: [commit] default_install_hook_types: [pre-commit, commit-msg] repos: - # - repo: https://github.com/astral-sh/ruff-pre-commit - # # Ruff version. - # rev: v0.1.11 - # hooks: - # # Run the linter. - # - id: ruff - # args: [--fix] - # # Run the formatter. - # - id: ruff-format + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.1.11 + hooks: + - id: ruff + args: [--fix] + - id: ruff-format - repo: https://github.com/timothycrosley/isort rev: 5.12.0 @@ -50,14 +47,15 @@ repos: args: - "--autofix" - "--indent=2" - # - repo: local - # hooks: - # - id: validate-commit-msg - # name: Commit Message is Valid - # language: pygrep - # entry: ^(break|build|ci|docs|feat|fix|perf|refactor|style|test|ops|hotfix|release|maint|init|enh|revert)\([\w,\.,\-,\(,\),\/]+\)(!?)(:)\s{1}([\w,\W,:]+) - # stages: [commit-msg] - # args: [--negate] + + - repo: local + hooks: + - id: validate-commit-msg + name: Commit Message is Valid + language: pygrep + entry: ^(break|build|ci|docs|feat|fix|perf|refactor|style|test|ops|hotfix|release|maint|init|enh|revert)\([\w,\.,\-,\(,\),\/]+\)(!?)(:)\s{1}([\w,\W,:]+) + stages: [commit-msg] + args: [--negate] - repo: https://github.com/pre-commit/mirrors-prettier rev: v3.0.3 diff --git a/dsp/modules/cache_utils.py b/dsp/modules/cache_utils.py index e6c09fe71..a7e2bde2f 100644 --- a/dsp/modules/cache_utils.py +++ b/dsp/modules/cache_utils.py @@ -6,7 +6,7 @@ from dsp.utils import dotdict -cache_turn_on = True +cache_turn_on = os.environ.get('DSP_CACHEBOOL', 'True').lower() != 'false' def noop_decorator(arg=None, *noop_args, **noop_kwargs): diff --git a/dsp/modules/gpt3.py b/dsp/modules/gpt3.py index cd9359bc1..02d3929b9 100644 --- a/dsp/modules/gpt3.py +++ b/dsp/modules/gpt3.py @@ -62,12 +62,15 @@ def __init__( api_provider: Literal["openai"] = "openai", api_base: Optional[str] = None, model_type: Literal["chat", "text"] = None, + system_prompt: Optional[str] = None, **kwargs, ): super().__init__(model) self.provider = "openai" openai.api_type = api_provider + self.system_prompt = system_prompt + assert ( api_provider != "azure" ), "Azure functionality with base OpenAI has been deprecated, please use dspy.AzureOpenAI instead." @@ -118,6 +121,9 @@ def basic_request(self, prompt: str, **kwargs): kwargs = {**self.kwargs, **kwargs} if self.model_type == "chat": # caching mechanism requires hashable kwargs + messages = [{"role": "user", "content": prompt}] + if self.system_prompt: + messages.insert(0, {"role": "system", "content": self.system_prompt}) kwargs["messages"] = [{"role": "user", "content": prompt}] kwargs = {"stringify_request": json.dumps(kwargs)} response = chat_request(**kwargs) diff --git a/dsp/primitives/predict.py b/dsp/primitives/predict.py index 41edf6c51..ffd4f8caa 100644 --- a/dsp/primitives/predict.py +++ b/dsp/primitives/predict.py @@ -98,11 +98,20 @@ def do_generate( completion[field_names[last_field_idx]] = "" # Recurse with greedy decoding and a shorter length. - max_tokens = kwargs.get("max_tokens", dsp.settings.lm.kwargs["max_tokens"]) + max_tokens = (kwargs.get("max_tokens") or + kwargs.get("max_output_tokens") or + dsp.settings.lm.kwargs.get("max_tokens") or + dsp.settings.lm.kwargs.get('max_output_tokens')) + + + if max_tokens is None: + raise ValueError("Required 'max_tokens' or 'max_output_tokens' not specified in settings.") max_tokens = min(max(75, max_tokens // 2), max_tokens) + keys = list(kwargs.keys()) + list(dsp.settings.lm.kwargs.keys()) + max_tokens_key = "max_tokens" if "max_tokens" in keys else "max_output_tokens" new_kwargs = { **kwargs, - "max_tokens": max_tokens, + max_tokens_key: max_tokens, "n": 1, "temperature": 0.0, } diff --git a/dsp/utils/settings.py b/dsp/utils/settings.py index 1de0ada7e..0d0846800 100644 --- a/dsp/utils/settings.py +++ b/dsp/utils/settings.py @@ -36,7 +36,7 @@ def __new__(cls): force_reuse_cached_compilation=False, compiling=False, skip_logprobs=False, - trace=None, + trace=[], release=0, log_openai_usage=False, bypass_assert=False, diff --git a/dspy/experimental/synthesizer/synthesizer.py b/dspy/experimental/synthesizer/synthesizer.py index 0672a2808..bd70e6d7a 100644 --- a/dspy/experimental/synthesizer/synthesizer.py +++ b/dspy/experimental/synthesizer/synthesizer.py @@ -202,7 +202,9 @@ def generate( } if self.config.num_example_for_optim: - kwargs["ground_source"] = random.sample(ground_source, self.config.num_example_for_optim) + if not isinstance(ground_source, list): + raise ValueError("Ground source must be a list of examples when `num_example_for_optim` is provided.") + kwargs["ground_source"] = random.sample(ground_source, k=self.config.num_example_for_optim) with dspy.context(lm=self.input_lm): inputs = self.input_predictor(**kwargs) diff --git a/dspy/retrieve/pgvector_rm.py b/dspy/retrieve/pgvector_rm.py index cf1773f17..d12eb6b05 100644 --- a/dspy/retrieve/pgvector_rm.py +++ b/dspy/retrieve/pgvector_rm.py @@ -1,6 +1,5 @@ -from typing import List, Optional - -import openai +import warnings +from typing import Callable, Optional import dspy @@ -12,6 +11,11 @@ raise ImportError( "The 'pgvector' extra is required to use PgVectorRM. Install it with `pip install dspy-ai[pgvector]`", ) +try: + import openai +except ImportError: + warnings.warn("`openai` is not installed. Install it with `pip install openai` to use OpenAI embedding models.", + category=ImportWarning) class PgVectorRM(dspy.Retrieve): @@ -26,7 +30,8 @@ class PgVectorRM(dspy.Retrieve): Args: db_url (str): A PostgreSQL database URL in psycopg2's DSN format pg_table_name (Optional[str]): name of the table containing passages - openai_client (openai.OpenAI): OpenAI client to use for computing query embeddings + openai_client (openai.OpenAI): OpenAI client to use for computing query embeddings. Either openai_client or embedding_func must be provided. + embedding_func (Callable): A function to use for computing query embeddings. Either openai_client or embedding_func must be provided. k (Optional[int]): Default number of top passages to retrieve. Defaults to 20 embedding_field (str = "embedding"): Field containing passage embeddings. Defaults to "embedding" fields (List[str] = ['text']): Fields to retrieve from the table. Defaults to "text" @@ -41,10 +46,10 @@ class PgVectorRM(dspy.Retrieve): openai.api_key = os.environ.get("OPENAI_API_KEY", None) openai_client = openai.OpenAI() - + llm = dspy.OpenAI(model="gpt-3.5-turbo") - - DATABASE_URL should be in the format postgresql://user:password@host/database + + DATABASE_URL should be in the format postgresql://user:password@host/database db_url=os.getenv("DATABASE_URL") retriever_model = PgVectorRM(conn, openai_client=openai_client, "paragraphs", fields=["text", "document_id"], k=20) @@ -60,16 +65,19 @@ def __init__( self, db_url: str, pg_table_name: str, - openai_client: openai.OpenAI, - k: Optional[int]=20, + openai_client: Optional[openai.OpenAI] = None, + embedding_func: Optional[Callable] = None, + k: Optional[int] = 20, embedding_field: str = "embedding", - fields: List[str] = ['text'], + fields: list[str] = ['text'], ): """ k = 20 is the number of paragraphs to retrieve """ + assert openai_client or embedding_func, "Either openai_client or embedding_func must be provided." self.openai_client = openai_client - + self.embedding_func = embedding_func + self.conn = psycopg2.connect(db_url) register_vector(self.conn) self.pg_table_name = pg_table_name @@ -80,19 +88,15 @@ def __init__( def forward(self, query: str, k: Optional[int]=20): """Search with PgVector for self.k top passages for query - + Args: query (str): The query to search for k (Optional[int]): The number of top passages to retrieve. Defaults to self.k - Returns: + Returns: dspy.Prediction: an object containing the retrieved passages. """ # Embed query - query_embedding = self.openai_client.embeddings.create( - model="text-embedding-ada-002", - input=query, - encoding_format="float", - ).data[0].embedding + query_embedding = self._get_embeddings(query) related_paragraphs = [] @@ -115,4 +119,14 @@ def forward(self, query: str, k: Optional[int]=20): for row in rows: related_paragraphs.append(dspy.Example(long_text=row[0], document_id=row[1])) # Return Prediction - return related_paragraphs \ No newline at end of file + return related_paragraphs + + def _get_embeddings(self, query: str) -> list[float]: + if self.openai_client is not None: + return self.openai_client.embeddings.create( + model="text-embedding-ada-002", + input=query, + encoding_format="float", + ).data[0].embedding + else: + return self.embedding_func(query) diff --git a/intro.ipynb b/intro.ipynb index c6db5871a..1dc2cf6d9 100644 --- a/intro.ipynb +++ b/intro.ipynb @@ -35,20 +35,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/var/folders/qz/yy2p38hj2m9c7bfp30yq99340000gn/T/ipykernel_40349/1846046422.py:20: DeprecationWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html\n", - " import pkg_resources # Install the package if it's not installed\n", - "/opt/homebrew/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - } - ], + "outputs": [], "source": [ "%load_ext autoreload\n", "%autoreload 2\n", @@ -94,7 +83,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -148,7 +137,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "metadata": {}, "outputs": [ { @@ -157,7 +146,7 @@ "(20, 50)" ] }, - "execution_count": 4, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -188,7 +177,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -215,7 +204,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -244,7 +233,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -306,7 +295,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -330,39 +319,15 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 8, "metadata": {}, "outputs": [ { - "ename": "OpenAIError", - "evalue": "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mOpenAIError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[9], line 5\u001b[0m\n\u001b[1;32m 2\u001b[0m generate_answer \u001b[38;5;241m=\u001b[39m dspy\u001b[38;5;241m.\u001b[39mPredict(BasicQA)\n\u001b[1;32m 4\u001b[0m \u001b[38;5;66;03m# Call the predictor on a particular input.\u001b[39;00m\n\u001b[0;32m----> 5\u001b[0m pred \u001b[38;5;241m=\u001b[39m \u001b[43mgenerate_answer\u001b[49m\u001b[43m(\u001b[49m\u001b[43mquestion\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdev_example\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mquestion\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;66;03m# Print the input and the prediction.\u001b[39;00m\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mQuestion: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mdev_example\u001b[38;5;241m.\u001b[39mquestion\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n", - "File \u001b[0;32m~/repos/dspy/dspy/predict/predict.py:49\u001b[0m, in \u001b[0;36mPredict.__call__\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__call__\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m---> 49\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mforward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/repos/dspy/dspy/predict/predict.py:91\u001b[0m, in \u001b[0;36mPredict.forward\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m 88\u001b[0m template \u001b[38;5;241m=\u001b[39m signature_to_template(signature)\n\u001b[1;32m 90\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlm \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m---> 91\u001b[0m x, C \u001b[38;5;241m=\u001b[39m \u001b[43mdsp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtemplate\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m)\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstage\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstage\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 92\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 93\u001b[0m \u001b[38;5;66;03m# Note: query_only=True means the instructions and examples are not included.\u001b[39;00m\n\u001b[1;32m 94\u001b[0m \u001b[38;5;66;03m# I'm not really sure why we'd want to do that, but it's there.\u001b[39;00m\n\u001b[1;32m 95\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m dsp\u001b[38;5;241m.\u001b[39msettings\u001b[38;5;241m.\u001b[39mcontext(lm\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlm, query_only\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m):\n", - "File \u001b[0;32m~/repos/dspy/dsp/primitives/predict.py:77\u001b[0m, in \u001b[0;36m_generate..do_generate\u001b[0;34m(example, stage, max_depth, original_example)\u001b[0m\n\u001b[1;32m 75\u001b[0m \u001b[38;5;66;03m# Generate and extract the fields.\u001b[39;00m\n\u001b[1;32m 76\u001b[0m prompt \u001b[38;5;241m=\u001b[39m template(example)\n\u001b[0;32m---> 77\u001b[0m completions: \u001b[38;5;28mlist\u001b[39m[\u001b[38;5;28mdict\u001b[39m[\u001b[38;5;28mstr\u001b[39m, Any]] \u001b[38;5;241m=\u001b[39m \u001b[43mgenerator\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 78\u001b[0m completions: \u001b[38;5;28mlist\u001b[39m[Example] \u001b[38;5;241m=\u001b[39m [template\u001b[38;5;241m.\u001b[39mextract(example, p) \u001b[38;5;28;01mfor\u001b[39;00m p \u001b[38;5;129;01min\u001b[39;00m completions]\n\u001b[1;32m 80\u001b[0m \u001b[38;5;66;03m# Find the completions that are most complete.\u001b[39;00m\n", - "File \u001b[0;32m~/repos/dspy/dsp/modules/gpt3.py:186\u001b[0m, in \u001b[0;36mGPT3.__call__\u001b[0;34m(self, prompt, only_completed, return_sorted, **kwargs)\u001b[0m\n\u001b[1;32m 178\u001b[0m \u001b[38;5;28;01massert\u001b[39;00m return_sorted \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mFalse\u001b[39;00m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfor now\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 180\u001b[0m \u001b[38;5;66;03m# if kwargs.get(\"n\", 1) > 1:\u001b[39;00m\n\u001b[1;32m 181\u001b[0m \u001b[38;5;66;03m# if self.model_type == \"chat\":\u001b[39;00m\n\u001b[1;32m 182\u001b[0m \u001b[38;5;66;03m# kwargs = {**kwargs}\u001b[39;00m\n\u001b[1;32m 183\u001b[0m \u001b[38;5;66;03m# else:\u001b[39;00m\n\u001b[1;32m 184\u001b[0m \u001b[38;5;66;03m# kwargs = {**kwargs, \"logprobs\": 5}\u001b[39;00m\n\u001b[0;32m--> 186\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrequest\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 188\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m dsp\u001b[38;5;241m.\u001b[39msettings\u001b[38;5;241m.\u001b[39mlog_openai_usage:\n\u001b[1;32m 189\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlog_usage(response)\n", - "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/backoff/_sync.py:105\u001b[0m, in \u001b[0;36mretry_exception..retry\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 96\u001b[0m details \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 97\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtarget\u001b[39m\u001b[38;5;124m\"\u001b[39m: target,\n\u001b[1;32m 98\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124margs\u001b[39m\u001b[38;5;124m\"\u001b[39m: args,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 101\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124melapsed\u001b[39m\u001b[38;5;124m\"\u001b[39m: elapsed,\n\u001b[1;32m 102\u001b[0m }\n\u001b[1;32m 104\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 105\u001b[0m ret \u001b[38;5;241m=\u001b[39m \u001b[43mtarget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 106\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m exception \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 107\u001b[0m max_tries_exceeded \u001b[38;5;241m=\u001b[39m (tries \u001b[38;5;241m==\u001b[39m max_tries_value)\n", - "File \u001b[0;32m~/repos/dspy/dsp/modules/gpt3.py:152\u001b[0m, in \u001b[0;36mGPT3.request\u001b[0;34m(self, prompt, **kwargs)\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel_type\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m kwargs:\n\u001b[1;32m 150\u001b[0m \u001b[38;5;28;01mdel\u001b[39;00m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel_type\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n\u001b[0;32m--> 152\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbasic_request\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprompt\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/repos/dspy/dsp/modules/gpt3.py:125\u001b[0m, in \u001b[0;36mGPT3.basic_request\u001b[0;34m(self, prompt, **kwargs)\u001b[0m\n\u001b[1;32m 123\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmessages\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m [{\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrole\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124muser\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcontent\u001b[39m\u001b[38;5;124m\"\u001b[39m: prompt}]\n\u001b[1;32m 124\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstringify_request\u001b[39m\u001b[38;5;124m\"\u001b[39m: json\u001b[38;5;241m.\u001b[39mdumps(kwargs)}\n\u001b[0;32m--> 125\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[43mchat_request\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 127\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 128\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mprompt\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m prompt\n", - "File \u001b[0;32m~/repos/dspy/dsp/modules/gpt3.py:273\u001b[0m, in \u001b[0;36mchat_request\u001b[0;34m(**kwargs)\u001b[0m\n\u001b[1;32m 270\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m OPENAI_LEGACY:\n\u001b[1;32m 271\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m _cached_gpt3_turbo_request_v2_wrapped(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m--> 273\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mv1_cached_gpt3_turbo_request_v2_wrapped\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mmodel_dump()\n", - "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/joblib/memory.py:655\u001b[0m, in \u001b[0;36mMemorizedFunc.__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 654\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__call__\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m--> 655\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_cached_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m[\u001b[38;5;241m0\u001b[39m]\n", - "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/joblib/memory.py:598\u001b[0m, in \u001b[0;36mMemorizedFunc._cached_call\u001b[0;34m(self, args, kwargs, shelving)\u001b[0m\n\u001b[1;32m 595\u001b[0m must_call \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 597\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m must_call:\n\u001b[0;32m--> 598\u001b[0m out, metadata \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcall\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 599\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmmap_mode \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 600\u001b[0m \u001b[38;5;66;03m# Memmap the output at the first call to be consistent with\u001b[39;00m\n\u001b[1;32m 601\u001b[0m \u001b[38;5;66;03m# later calls\u001b[39;00m\n\u001b[1;32m 602\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_verbose:\n", - "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/joblib/memory.py:856\u001b[0m, in \u001b[0;36mMemorizedFunc.call\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 854\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_verbose \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m 855\u001b[0m \u001b[38;5;28mprint\u001b[39m(format_call(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfunc, args, kwargs))\n\u001b[0;32m--> 856\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 857\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstore_backend\u001b[38;5;241m.\u001b[39mdump_item(\n\u001b[1;32m 858\u001b[0m [func_id, args_id], output, verbose\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_verbose)\n\u001b[1;32m 860\u001b[0m duration \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime() \u001b[38;5;241m-\u001b[39m start_time\n", - "File \u001b[0;32m~/repos/dspy/dsp/modules/gpt3.py:266\u001b[0m, in \u001b[0;36mv1_cached_gpt3_turbo_request_v2_wrapped\u001b[0;34m(**kwargs)\u001b[0m\n\u001b[1;32m 263\u001b[0m \u001b[38;5;129m@functools\u001b[39m\u001b[38;5;241m.\u001b[39mlru_cache(maxsize\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01mif\u001b[39;00m cache_turn_on \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;241m0\u001b[39m)\n\u001b[1;32m 264\u001b[0m \u001b[38;5;129m@NotebookCacheMemory\u001b[39m\u001b[38;5;241m.\u001b[39mcache\n\u001b[1;32m 265\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mv1_cached_gpt3_turbo_request_v2_wrapped\u001b[39m(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m--> 266\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mv1_cached_gpt3_turbo_request_v2\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/joblib/memory.py:655\u001b[0m, in \u001b[0;36mMemorizedFunc.__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 654\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__call__\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m--> 655\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_cached_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m[\u001b[38;5;241m0\u001b[39m]\n", - "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/joblib/memory.py:598\u001b[0m, in \u001b[0;36mMemorizedFunc._cached_call\u001b[0;34m(self, args, kwargs, shelving)\u001b[0m\n\u001b[1;32m 595\u001b[0m must_call \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 597\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m must_call:\n\u001b[0;32m--> 598\u001b[0m out, metadata \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcall\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 599\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmmap_mode \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 600\u001b[0m \u001b[38;5;66;03m# Memmap the output at the first call to be consistent with\u001b[39;00m\n\u001b[1;32m 601\u001b[0m \u001b[38;5;66;03m# later calls\u001b[39;00m\n\u001b[1;32m 602\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_verbose:\n", - "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/joblib/memory.py:856\u001b[0m, in \u001b[0;36mMemorizedFunc.call\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 854\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_verbose \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m 855\u001b[0m \u001b[38;5;28mprint\u001b[39m(format_call(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfunc, args, kwargs))\n\u001b[0;32m--> 856\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 857\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstore_backend\u001b[38;5;241m.\u001b[39mdump_item(\n\u001b[1;32m 858\u001b[0m [func_id, args_id], output, verbose\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_verbose)\n\u001b[1;32m 860\u001b[0m duration \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime() \u001b[38;5;241m-\u001b[39m start_time\n", - "File \u001b[0;32m~/repos/dspy/dsp/modules/gpt3.py:260\u001b[0m, in \u001b[0;36mv1_cached_gpt3_turbo_request_v2\u001b[0;34m(**kwargs)\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstringify_request\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m kwargs:\n\u001b[1;32m 259\u001b[0m kwargs \u001b[38;5;241m=\u001b[39m json\u001b[38;5;241m.\u001b[39mloads(kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstringify_request\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n\u001b[0;32m--> 260\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mopenai\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mchat\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcompletions\u001b[49m\u001b[38;5;241m.\u001b[39mcreate(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", - "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/openai/_utils/_proxy.py:20\u001b[0m, in \u001b[0;36mLazyProxy.__getattr__\u001b[0;34m(self, attr)\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__getattr__\u001b[39m(\u001b[38;5;28mself\u001b[39m, attr: \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28mobject\u001b[39m:\n\u001b[0;32m---> 20\u001b[0m proxied \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m__get_proxied__\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 21\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(proxied, LazyProxy):\n\u001b[1;32m 22\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m proxied \u001b[38;5;66;03m# pyright: ignore\u001b[39;00m\n", - "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/openai/_utils/_proxy.py:55\u001b[0m, in \u001b[0;36mLazyProxy.__get_proxied__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 54\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__get_proxied__\u001b[39m(\u001b[38;5;28mself\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m T:\n\u001b[0;32m---> 55\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m__load__\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/openai/_module_client.py:12\u001b[0m, in \u001b[0;36mChatProxy.__load__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[38;5;129m@override\u001b[39m\n\u001b[1;32m 11\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__load__\u001b[39m(\u001b[38;5;28mself\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m resources\u001b[38;5;241m.\u001b[39mChat:\n\u001b[0;32m---> 12\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_load_client\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mchat\n", - "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/openai/__init__.py:297\u001b[0m, in \u001b[0;36m_load_client\u001b[0;34m()\u001b[0m\n\u001b[1;32m 281\u001b[0m _client \u001b[38;5;241m=\u001b[39m _AzureModuleClient( \u001b[38;5;66;03m# type: ignore\u001b[39;00m\n\u001b[1;32m 282\u001b[0m api_version\u001b[38;5;241m=\u001b[39mapi_version,\n\u001b[1;32m 283\u001b[0m azure_endpoint\u001b[38;5;241m=\u001b[39mazure_endpoint,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 293\u001b[0m http_client\u001b[38;5;241m=\u001b[39mhttp_client,\n\u001b[1;32m 294\u001b[0m )\n\u001b[1;32m 295\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m _client\n\u001b[0;32m--> 297\u001b[0m _client \u001b[38;5;241m=\u001b[39m \u001b[43m_ModuleClient\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 298\u001b[0m \u001b[43m \u001b[49m\u001b[43mapi_key\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mapi_key\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 299\u001b[0m \u001b[43m \u001b[49m\u001b[43morganization\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43morganization\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 300\u001b[0m \u001b[43m \u001b[49m\u001b[43mbase_url\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mbase_url\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 301\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 302\u001b[0m \u001b[43m \u001b[49m\u001b[43mmax_retries\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmax_retries\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 303\u001b[0m \u001b[43m \u001b[49m\u001b[43mdefault_headers\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdefault_headers\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 304\u001b[0m \u001b[43m \u001b[49m\u001b[43mdefault_query\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdefault_query\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 305\u001b[0m \u001b[43m \u001b[49m\u001b[43mhttp_client\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mhttp_client\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 306\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 307\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m _client\n\u001b[1;32m 309\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m _client\n", - "File \u001b[0;32m/opt/homebrew/lib/python3.11/site-packages/openai/_client.py:98\u001b[0m, in \u001b[0;36mOpenAI.__init__\u001b[0;34m(self, api_key, organization, base_url, timeout, max_retries, default_headers, default_query, http_client, _strict_response_validation)\u001b[0m\n\u001b[1;32m 96\u001b[0m api_key \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39menviron\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mOPENAI_API_KEY\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 97\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m api_key \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m---> 98\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m OpenAIError(\n\u001b[1;32m 99\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mThe api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 100\u001b[0m )\n\u001b[1;32m 101\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mapi_key \u001b[38;5;241m=\u001b[39m api_key\n\u001b[1;32m 103\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m organization \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n", - "\u001b[0;31mOpenAIError\u001b[0m: The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable" + "name": "stdout", + "output_type": "stream", + "text": [ + "Question: What is the nationality of the chef and restaurateur featured in Restaurant: Impossible?\n", + "Predicted Answer: American\n" ] } ], @@ -389,7 +354,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -434,7 +399,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -482,7 +447,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -520,7 +485,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -553,7 +518,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ @@ -579,7 +544,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ @@ -625,7 +590,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -676,7 +641,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -711,7 +676,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 17, "metadata": {}, "outputs": [ { @@ -861,7 +826,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 18, "metadata": {}, "outputs": [ { @@ -894,7 +859,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 19, "metadata": {}, "outputs": [ { @@ -1052,7 +1017,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 20, "metadata": {}, "outputs": [ { @@ -1218,7 +1183,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "metadata": {}, "outputs": [], "source": [ @@ -1241,7 +1206,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 22, "metadata": {}, "outputs": [], "source": [ @@ -1305,7 +1270,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 23, "metadata": {}, "outputs": [ { @@ -1341,7 +1306,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 24, "metadata": {}, "outputs": [ { @@ -1472,7 +1437,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 25, "metadata": {}, "outputs": [], "source": [ @@ -1497,7 +1462,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 26, "metadata": {}, "outputs": [ { @@ -1528,28 +1493,16 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 27, "metadata": {}, - "outputs": [ - { - "ename": "NameError", - "evalue": "name 'evaluate_on_hotpotqa' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[1], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m uncompiled_baleen_retrieval_score \u001b[38;5;241m=\u001b[39m \u001b[43mevaluate_on_hotpotqa\u001b[49m(uncompiled_baleen, metric\u001b[38;5;241m=\u001b[39mgold_passages_retrieved, display\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)\n", - "\u001b[0;31mNameError\u001b[0m: name 'evaluate_on_hotpotqa' is not defined" - ] - } - ], + "outputs": [], "source": [ "uncompiled_baleen_retrieval_score = evaluate_on_hotpotqa(uncompiled_baleen, metric=gold_passages_retrieved, display=False)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 28, "metadata": {}, "outputs": [ { @@ -1679,7 +1632,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 29, "metadata": {}, "outputs": [ { @@ -1718,7 +1671,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 30, "metadata": {}, "outputs": [ { @@ -1998,7 +1951,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.8" + "version": "3.9.17" }, "orig_nbformat": 4 },