diff --git "a/.\"git grep -n -- '*.py' \"from agents.\"" "b/.\"git grep -n -- '*.py' \"from agents.\"" new file mode 100644 index 00000000..d92b872e --- /dev/null +++ "b/.\"git grep -n -- '*.py' \"from agents.\"" @@ -0,0 +1,127 @@ +docs/handoffs.md:88:from agents.extensions import handoff_filters +docs/handoffs.md:106:from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX +docs/ja/handoffs.md:88:from agents.extensions import handoff_filters +docs/ja/handoffs.md:106:from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX +docs/ja/visualization.md:25:from agents.extensions.visualization import draw_graph +docs/ja/voice/quickstart.md:57:from agents.extensions.handoff_prompt import prompt_with_handoff_instructions +docs/ja/voice/quickstart.md:94:from agents.voice import SingleAgentVoiceWorkflow, VoicePipeline +docs/ja/voice/quickstart.md:103:from agents.voice import AudioInput +docs/ja/voice/quickstart.md:137:from agents.voice import ( +docs/ja/voice/quickstart.md:142:from agents.extensions.handoff_prompt import prompt_with_handoff_instructions +docs/visualization.md:25:from agents.extensions.visualization import draw_graph +docs/voice/quickstart.md:57:from agents.extensions.handoff_prompt import prompt_with_handoff_instructions +docs/voice/quickstart.md:94:from agents.voice import SingleAgentVoiceWorkflow, VoicePipeline +docs/voice/quickstart.md:103:from agents.voice import AudioInput +docs/voice/quickstart.md:137:from agents.voice import ( +docs/voice/quickstart.md:142:from agents.extensions.handoff_prompt import prompt_with_handoff_instructions +examples/customer_service/main.py:23:from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX +examples/financial_research_agent/agents/search_agent.py:2:from agents.model_settings import ModelSettings +examples/handoffs/message_filter.py:7:from agents.extensions import handoff_filters +examples/handoffs/message_filter_streaming.py:7:from agents.extensions import handoff_filters +examples/mcp/filesystem_example/main.py:6:from agents.mcp import MCPServer, MCPServerStdio +examples/mcp/git_example/main.py:5:from agents.mcp import MCPServer, MCPServerStdio +examples/mcp/sse_example/main.py:9:from agents.mcp import MCPServer, MCPServerSse +examples/mcp/sse_example/main.py:10:from agents.model_settings import ModelSettings +examples/research_bot/agents/search_agent.py:2:from agents.model_settings import ModelSettings +examples/voice/static/main.py:7:from agents.extensions.handoff_prompt import prompt_with_handoff_instructions +examples/voice/static/main.py:8:from agents.voice import ( +examples/voice/streamed/main.py:15:from agents.voice import StreamedAudioInput, VoicePipeline +examples/voice/streamed/my_workflow.py:6:from agents.extensions.handoff_prompt import prompt_with_handoff_instructions +examples/voice/streamed/my_workflow.py:7:from agents.voice import VoiceWorkflowBase, VoiceWorkflowHelper +src/app/agent_server.py:13:from agents.profilebuilder_agent import profilebuilder_agent +src/app/agent_server.py:14:from agents.profilebuilder import router as profilebuilder_router +src/app/profilebuilder.py:3:from agents.profilebuilder_agent import profilebuilder_agent +tests/conftest.py:5:from agents.models import _openai_shared +tests/conftest.py:6:from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel +tests/conftest.py:7:from agents.models.openai_responses import OpenAIResponsesModel +tests/conftest.py:8:from agents.tracing import set_trace_processors +tests/conftest.py:9:from agents.tracing.setup import GLOBAL_TRACE_PROVIDER +tests/fake_model.py:8:from agents.agent_output import AgentOutputSchema +tests/fake_model.py:9:from agents.handoffs import Handoff +tests/fake_model.py:10:from agents.items import ( +tests/fake_model.py:16:from agents.model_settings import ModelSettings +tests/fake_model.py:17:from agents.models.interface import Model, ModelTracing +tests/fake_model.py:18:from agents.tool import Tool +tests/fake_model.py:19:from agents.tracing import SpanError, generation_span +tests/fake_model.py:20:from agents.usage import Usage +tests/mcp/helpers.py:8:from agents.mcp import MCPServer +tests/mcp/test_caching.py:6:from agents.mcp import MCPServerStdio +tests/mcp/test_connect_disconnect.py:6:from agents.mcp import MCPServerStdio +tests/mcp/test_mcp_util.py:10:from agents.exceptions import AgentsException, ModelBehaviorError +tests/mcp/test_mcp_util.py:11:from agents.mcp import MCPServer, MCPUtil +tests/mcp/test_server_errors.py:3:from agents.exceptions import UserError +tests/mcp/test_server_errors.py:4:from agents.mcp.server import _MCPServerWithClientSession +tests/test_agent_hooks.py:10:from agents.agent import Agent +tests/test_agent_hooks.py:11:from agents.lifecycle import AgentHooks +tests/test_agent_hooks.py:12:from agents.run import Runner +tests/test_agent_hooks.py:13:from agents.run_context import RunContextWrapper, TContext +tests/test_agent_hooks.py:14:from agents.tool import Tool +tests/test_agent_runner.py:26:from agents.agent import ToolsToFinalOutputResult +tests/test_agent_runner.py:27:from agents.tool import FunctionToolResult, function_tool +tests/test_agent_runner_streamed.py:23:from agents.items import RunItem +tests/test_agent_runner_streamed.py:24:from agents.run import RunConfig +tests/test_agent_runner_streamed.py:25:from agents.stream_events import AgentUpdatedStreamEvent +tests/test_computer_action.py:34:from agents._run_impl import ComputerAction, ToolRunComputerAction +tests/test_computer_action.py:35:from agents.items import ToolCallOutputItem +tests/test_config.py:7:from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel +tests/test_config.py:8:from agents.models.openai_provider import OpenAIProvider +tests/test_config.py:9:from agents.models.openai_responses import OpenAIResponsesModel +tests/test_doc_parsing.py:1:from agents.function_schema import generate_func_documentation +tests/test_extension_filters.py:4:from agents.extensions.handoff_filters import remove_all_tools +tests/test_extension_filters.py:5:from agents.items import ( +tests/test_function_schema.py:10:from agents.exceptions import UserError +tests/test_function_schema.py:11:from agents.function_schema import function_schema +tests/test_function_tool.py:9:from agents.tool import default_tool_error_function +tests/test_function_tool_decorator.py:9:from agents.run_context import RunContextWrapper +tests/test_guardrails.py:16:from agents.guardrail import input_guardrail, output_guardrail +tests/test_openai_chatcompletions.py:33:from agents.models.fake_id import FAKE_RESPONSES_ID +tests/test_openai_chatcompletions.py:34:from agents.models.openai_chatcompletions import _Converter +tests/test_openai_chatcompletions_converter.py:41:from agents.agent_output import AgentOutputSchema +tests/test_openai_chatcompletions_converter.py:42:from agents.exceptions import UserError +tests/test_openai_chatcompletions_converter.py:43:from agents.items import TResponseInputItem +tests/test_openai_chatcompletions_converter.py:44:from agents.models.fake_id import FAKE_RESPONSES_ID +tests/test_openai_chatcompletions_converter.py:45:from agents.models.openai_chatcompletions import _Converter +tests/test_openai_chatcompletions_stream.py:20:from agents.model_settings import ModelSettings +tests/test_openai_chatcompletions_stream.py:21:from agents.models.interface import ModelTracing +tests/test_openai_chatcompletions_stream.py:22:from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel +tests/test_openai_chatcompletions_stream.py:23:from agents.models.openai_provider import OpenAIProvider +tests/test_openai_responses_converter.py:43:from agents.models.openai_responses import Converter +tests/test_output_tool.py:8:from agents.agent_output import _WRAPPER_DICT_KEY +tests/test_output_tool.py:9:from agents.util import _json +tests/test_pretty_print.py:8:from agents.agent_output import _WRAPPER_DICT_KEY +tests/test_pretty_print.py:9:from agents.util._pretty_print import pretty_print_result, pretty_print_run_result_streaming +tests/test_responses_tracing.py:7:from agents.tracing.span_data import ResponseSpanData +tests/test_run_config.py:6:from agents.models.interface import Model, ModelProvider +tests/test_run_step_execution.py:22:from agents._run_impl import ( +tests/test_run_step_processing.py:26:from agents._run_impl import RunImpl +tests/test_strict_schema.py:3:from agents.exceptions import UserError +tests/test_strict_schema.py:4:from agents.strict_schema import ensure_strict_json_schema +tests/test_tool_choice_reset.py:4:from agents._run_impl import AgentToolUseTracker, RunImpl +tests/test_tool_converter.py:5:from agents.exceptions import UserError +tests/test_tool_converter.py:6:from agents.models.openai_chatcompletions import ToolConverter +tests/test_tool_converter.py:7:from agents.tool import FileSearchTool, WebSearchTool +tests/test_tool_use_behavior.py:19:from agents._run_impl import RunImpl +tests/test_trace_processor.py:8:from agents.tracing.processor_interface import TracingProcessor +tests/test_trace_processor.py:9:from agents.tracing.processors import BackendSpanExporter, BatchTraceProcessor +tests/test_trace_processor.py:10:from agents.tracing.span_data import AgentSpanData +tests/test_trace_processor.py:11:from agents.tracing.spans import SpanImpl +tests/test_trace_processor.py:12:from agents.tracing.traces import TraceImpl +tests/test_tracing.py:9:from agents.tracing import ( +tests/test_tracing.py:19:from agents.tracing.spans import SpanError +tests/test_visualization.py:7:from agents.extensions.visualization import ( +tests/test_visualization.py:13:from agents.handoffs import Handoff +tests/testing_processor.py:7:from agents.tracing import Span, Trace, TracingProcessor +tests/tracing/test_processor_api_key.py:3:from agents.tracing.processors import BackendSpanExporter +tests/voice/fake_models.py:10: from agents.voice import ( +tests/voice/helpers.py:2: from agents.voice import StreamedAudioResult +tests/voice/test_input.py:9: from agents.voice import AudioInput, StreamedAudioInput +tests/voice/test_input.py:10: from agents.voice.input import DEFAULT_SAMPLE_RATE, _buffer_to_audio_file +tests/voice/test_openai_stt.py:12: from agents.voice import OpenAISTTTranscriptionSession, StreamedAudioInput, STTModelSettings +tests/voice/test_openai_stt.py:13: from agents.voice.exceptions import STTWebsocketConnectionError +tests/voice/test_openai_stt.py:14: from agents.voice.models.openai_stt import EVENT_INACTIVITY_TIMEOUT +tests/voice/test_openai_tts.py:9: from agents.voice import OpenAITTSModel, TTSModelSettings +tests/voice/test_pipeline.py:8: from agents.voice import AudioInput, TTSModelSettings, VoicePipeline, VoicePipelineConfig +tests/voice/test_workflow.py:12:from agents.agent_output import AgentOutputSchema +tests/voice/test_workflow.py:13:from agents.handoffs import Handoff +tests/voice/test_workflow.py:14:from agents.items import ( +tests/voice/test_workflow.py:22: from agents.voice import SingleAgentVoiceWorkflow diff --git a/.env.sample b/.env.sample new file mode 100644 index 00000000..08e8ddf6 --- /dev/null +++ b/.env.sample @@ -0,0 +1,10 @@ +# Bubble Web‑hook endpoints +BUBBLE_STRUCTURED_URL= "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_return_output" +BUBBLE_CHAT_URL= "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_chat_response" + +# (Optional) task‑specific override +BUBBLE_PROFILE_WEBHOOK= "https://helpmeaiai.bubbleapps.io/version-test/api/1.1/wf/openai_profile_output" + +# OpenAI, DB, etc. +OPENAI_API_KEY= "YOUR_OPENAI_KEY_HERE" +DATABASE_URL= diff --git a/.gitignore b/.gitignore index 7dd22b88..34252901 100644 --- a/.gitignore +++ b/.gitignore @@ -141,4 +141,5 @@ cython_debug/ .ruff_cache/ # PyPI configuration file -.pypirc \ No newline at end of file +.pypircopenai_agents/ +openai_agents/ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..d1f194a8 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "vendor/openai-agents-python"] + path = vendor/openai-agents-python + url = https://github.com/openai/openai-agents-python.git diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 00000000..65655143 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,1434 @@ +# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "anyio" +version = "4.9.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"}, + {file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} + +[package.extras] +doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] +test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""] +trio = ["trio (>=0.26.1)"] + +[[package]] +name = "certifi" +version = "2025.1.31" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, + {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.1" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, + {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, + {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, +] + +[[package]] +name = "click" +version = "8.1.8" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main"] +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "distro" +version = "1.9.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, + {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +groups = ["main"] +markers = "python_version < \"3.11\"" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "fastapi" +version = "0.115.12" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "fastapi-0.115.12-py3-none-any.whl", hash = "sha256:e94613d6c05e27be7ffebdd6ea5f388112e5e430c8f7d6494a9d1d88d43e814d"}, + {file = "fastapi-0.115.12.tar.gz", hash = "sha256:1e2c2a2646905f9e83d32f04a3f86aff4a286669c6c950ca95b5fd68c2602681"}, +] + +[package.dependencies] +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" +starlette = ">=0.40.0,<0.47.0" +typing-extensions = ">=4.8.0" + +[package.extras] +all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] +standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "jinja2 (>=3.1.5)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"] + +[[package]] +name = "ghp-import" +version = "2.1.0" +description = "Copy your docs directly to the gh-pages branch." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, + {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, +] + +[package.dependencies] +python-dateutil = ">=2.8.1" + +[package.extras] +dev = ["flake8", "markdown", "twine", "wheel"] + +[[package]] +name = "graphviz" +version = "0.20.3" +description = "Simple Python interface for Graphviz" +optional = true +python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"viz\"" +files = [ + {file = "graphviz-0.20.3-py3-none-any.whl", hash = "sha256:81f848f2904515d8cd359cc611faba817598d2feaac4027b266aa3eda7b3dde5"}, + {file = "graphviz-0.20.3.zip", hash = "sha256:09d6bc81e6a9fa392e7ba52135a9d49f1ed62526f96499325930e87ca1b5925d"}, +] + +[package.extras] +dev = ["flake8", "pep8-naming", "tox (>=3)", "twine", "wheel"] +docs = ["sphinx (>=5,<7)", "sphinx-autodoc-typehints", "sphinx-rtd-theme"] +test = ["coverage", "pytest (>=7,<8.1)", "pytest-cov", "pytest-mock (>=3)"] + +[[package]] +name = "griffe" +version = "1.7.2" +description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "griffe-1.7.2-py3-none-any.whl", hash = "sha256:1ed9c2e338a75741fc82083fe5a1bc89cb6142efe126194cc313e34ee6af5423"}, + {file = "griffe-1.7.2.tar.gz", hash = "sha256:98d396d803fab3b680c2608f300872fd57019ed82f0672f5b5323a9ad18c540c"}, +] + +[package.dependencies] +colorama = ">=0.4" + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.8" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "httpcore-1.0.8-py3-none-any.whl", hash = "sha256:5254cf149bcb5f75e9d1b2b9f729ea4a4b883d1ad7379fc632b727cec23674be"}, + {file = "httpcore-1.0.8.tar.gz", hash = "sha256:86e94505ed24ea06514883fd44d2bc02d90e77e7979c8eb71b90f41d364a1bad"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<1.0)"] + +[[package]] +name = "httpx" +version = "0.28.1" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" + +[package.extras] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "httpx-sse" +version = "0.4.0" +description = "Consume Server-Sent Event (SSE) messages with HTTPX." +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, + {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, +] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "importlib-metadata" +version = "8.6.1" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version == \"3.9\"" +files = [ + {file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"}, + {file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"}, +] + +[package.dependencies] +zipp = ">=3.20" + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] + +[[package]] +name = "jinja2" +version = "3.1.6" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jiter" +version = "0.9.0" +description = "Fast iterable JSON parser." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "jiter-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:816ec9b60fdfd1fec87da1d7ed46c66c44ffec37ab2ef7de5b147b2fce3fd5ad"}, + {file = "jiter-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b1d3086f8a3ee0194ecf2008cf81286a5c3e540d977fa038ff23576c023c0ea"}, + {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1339f839b91ae30b37c409bf16ccd3dc453e8b8c3ed4bd1d6a567193651a4a51"}, + {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ffba79584b3b670fefae66ceb3a28822365d25b7bf811e030609a3d5b876f538"}, + {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cfc7d0a8e899089d11f065e289cb5b2daf3d82fbe028f49b20d7b809193958d"}, + {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e00a1a2bbfaaf237e13c3d1592356eab3e9015d7efd59359ac8b51eb56390a12"}, + {file = "jiter-0.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1d9870561eb26b11448854dce0ff27a9a27cb616b632468cafc938de25e9e51"}, + {file = "jiter-0.9.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9872aeff3f21e437651df378cb75aeb7043e5297261222b6441a620218b58708"}, + {file = "jiter-0.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1fd19112d1049bdd47f17bfbb44a2c0001061312dcf0e72765bfa8abd4aa30e5"}, + {file = "jiter-0.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6ef5da104664e526836070e4a23b5f68dec1cc673b60bf1edb1bfbe8a55d0678"}, + {file = "jiter-0.9.0-cp310-cp310-win32.whl", hash = "sha256:cb12e6d65ebbefe5518de819f3eda53b73187b7089040b2d17f5b39001ff31c4"}, + {file = "jiter-0.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:c43ca669493626d8672be3b645dbb406ef25af3f4b6384cfd306da7eb2e70322"}, + {file = "jiter-0.9.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6c4d99c71508912a7e556d631768dcdef43648a93660670986916b297f1c54af"}, + {file = "jiter-0.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8f60fb8ce7df529812bf6c625635a19d27f30806885139e367af93f6e734ef58"}, + {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51c4e1a4f8ea84d98b7b98912aa4290ac3d1eabfde8e3c34541fae30e9d1f08b"}, + {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f4c677c424dc76684fea3e7285a7a2a7493424bea89ac441045e6a1fb1d7b3b"}, + {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2221176dfec87f3470b21e6abca056e6b04ce9bff72315cb0b243ca9e835a4b5"}, + {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3c7adb66f899ffa25e3c92bfcb593391ee1947dbdd6a9a970e0d7e713237d572"}, + {file = "jiter-0.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c98d27330fdfb77913c1097a7aab07f38ff2259048949f499c9901700789ac15"}, + {file = "jiter-0.9.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eda3f8cc74df66892b1d06b5d41a71670c22d95a1ca2cbab73654745ce9d0419"}, + {file = "jiter-0.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dd5ab5ddc11418dce28343123644a100f487eaccf1de27a459ab36d6cca31043"}, + {file = "jiter-0.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:42f8a68a69f047b310319ef8e2f52fdb2e7976fb3313ef27df495cf77bcad965"}, + {file = "jiter-0.9.0-cp311-cp311-win32.whl", hash = "sha256:a25519efb78a42254d59326ee417d6f5161b06f5da827d94cf521fed961b1ff2"}, + {file = "jiter-0.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:923b54afdd697dfd00d368b7ccad008cccfeb1efb4e621f32860c75e9f25edbd"}, + {file = "jiter-0.9.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7b46249cfd6c48da28f89eb0be3f52d6fdb40ab88e2c66804f546674e539ec11"}, + {file = "jiter-0.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:609cf3c78852f1189894383cf0b0b977665f54cb38788e3e6b941fa6d982c00e"}, + {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d726a3890a54561e55a9c5faea1f7655eda7f105bd165067575ace6e65f80bb2"}, + {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2e89dc075c1fef8fa9be219e249f14040270dbc507df4215c324a1839522ea75"}, + {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04e8ffa3c353b1bc4134f96f167a2082494351e42888dfcf06e944f2729cbe1d"}, + {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:203f28a72a05ae0e129b3ed1f75f56bc419d5f91dfacd057519a8bd137b00c42"}, + {file = "jiter-0.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fca1a02ad60ec30bb230f65bc01f611c8608b02d269f998bc29cca8619a919dc"}, + {file = "jiter-0.9.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:237e5cee4d5d2659aaf91bbf8ec45052cc217d9446070699441a91b386ae27dc"}, + {file = "jiter-0.9.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:528b6b71745e7326eed73c53d4aa57e2a522242320b6f7d65b9c5af83cf49b6e"}, + {file = "jiter-0.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9f48e86b57bc711eb5acdfd12b6cb580a59cc9a993f6e7dcb6d8b50522dcd50d"}, + {file = "jiter-0.9.0-cp312-cp312-win32.whl", hash = "sha256:699edfde481e191d81f9cf6d2211debbfe4bd92f06410e7637dffb8dd5dfde06"}, + {file = "jiter-0.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:099500d07b43f61d8bd780466d429c45a7b25411b334c60ca875fa775f68ccb0"}, + {file = "jiter-0.9.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:2764891d3f3e8b18dce2cff24949153ee30c9239da7c00f032511091ba688ff7"}, + {file = "jiter-0.9.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:387b22fbfd7a62418d5212b4638026d01723761c75c1c8232a8b8c37c2f1003b"}, + {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d8da8629ccae3606c61d9184970423655fb4e33d03330bcdfe52d234d32f69"}, + {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1be73d8982bdc278b7b9377426a4b44ceb5c7952073dd7488e4ae96b88e1103"}, + {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2228eaaaa111ec54b9e89f7481bffb3972e9059301a878d085b2b449fbbde635"}, + {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:11509bfecbc319459647d4ac3fd391d26fdf530dad00c13c4dadabf5b81f01a4"}, + {file = "jiter-0.9.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f22238da568be8bbd8e0650e12feeb2cfea15eda4f9fc271d3b362a4fa0604d"}, + {file = "jiter-0.9.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17f5d55eb856597607562257c8e36c42bc87f16bef52ef7129b7da11afc779f3"}, + {file = "jiter-0.9.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:6a99bed9fbb02f5bed416d137944419a69aa4c423e44189bc49718859ea83bc5"}, + {file = "jiter-0.9.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e057adb0cd1bd39606100be0eafe742de2de88c79df632955b9ab53a086b3c8d"}, + {file = "jiter-0.9.0-cp313-cp313-win32.whl", hash = "sha256:f7e6850991f3940f62d387ccfa54d1a92bd4bb9f89690b53aea36b4364bcab53"}, + {file = "jiter-0.9.0-cp313-cp313-win_amd64.whl", hash = "sha256:c8ae3bf27cd1ac5e6e8b7a27487bf3ab5f82318211ec2e1346a5b058756361f7"}, + {file = "jiter-0.9.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f0b2827fb88dda2cbecbbc3e596ef08d69bda06c6f57930aec8e79505dc17001"}, + {file = "jiter-0.9.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062b756ceb1d40b0b28f326cba26cfd575a4918415b036464a52f08632731e5a"}, + {file = "jiter-0.9.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6f7838bc467ab7e8ef9f387bd6de195c43bad82a569c1699cb822f6609dd4cdf"}, + {file = "jiter-0.9.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4a2d16360d0642cd68236f931b85fe50288834c383492e4279d9f1792e309571"}, + {file = "jiter-0.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e84ed1c9c9ec10bbb8c37f450077cbe3c0d4e8c2b19f0a49a60ac7ace73c7452"}, + {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f3c848209ccd1bfa344a1240763975ca917de753c7875c77ec3034f4151d06c"}, + {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7825f46e50646bee937e0f849d14ef3a417910966136f59cd1eb848b8b5bb3e4"}, + {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d82a811928b26d1a6311a886b2566f68ccf2b23cf3bfed042e18686f1f22c2d7"}, + {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c058ecb51763a67f019ae423b1cbe3fa90f7ee6280c31a1baa6ccc0c0e2d06e"}, + {file = "jiter-0.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9897115ad716c48f0120c1f0c4efae348ec47037319a6c63b2d7838bb53aaef4"}, + {file = "jiter-0.9.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:351f4c90a24c4fb8c87c6a73af2944c440494ed2bea2094feecacb75c50398ae"}, + {file = "jiter-0.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d45807b0f236c485e1e525e2ce3a854807dfe28ccf0d013dd4a563395e28008a"}, + {file = "jiter-0.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1537a890724ba00fdba21787010ac6f24dad47f763410e9e1093277913592784"}, + {file = "jiter-0.9.0-cp38-cp38-win32.whl", hash = "sha256:e3630ec20cbeaddd4b65513fa3857e1b7c4190d4481ef07fb63d0fad59033321"}, + {file = "jiter-0.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:2685f44bf80e95f8910553bf2d33b9c87bf25fceae6e9f0c1355f75d2922b0ee"}, + {file = "jiter-0.9.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9ef340fae98065071ccd5805fe81c99c8f80484e820e40043689cf97fb66b3e2"}, + {file = "jiter-0.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:efb767d92c63b2cd9ec9f24feeb48f49574a713870ec87e9ba0c2c6e9329c3e2"}, + {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:113f30f87fb1f412510c6d7ed13e91422cfd329436364a690c34c8b8bd880c42"}, + {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8793b6df019b988526f5a633fdc7456ea75e4a79bd8396a3373c371fc59f5c9b"}, + {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7a9aaa5102dba4e079bb728076fadd5a2dca94c05c04ce68004cfd96f128ea34"}, + {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d838650f6ebaf4ccadfb04522463e74a4c378d7e667e0eb1865cfe3990bfac49"}, + {file = "jiter-0.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0194f813efdf4b8865ad5f5c5f50f8566df7d770a82c51ef593d09e0b347020"}, + {file = "jiter-0.9.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a7954a401d0a8a0b8bc669199db78af435aae1e3569187c2939c477c53cb6a0a"}, + {file = "jiter-0.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4feafe787eb8a8d98168ab15637ca2577f6ddf77ac6c8c66242c2d028aa5420e"}, + {file = "jiter-0.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:27cd1f2e8bb377f31d3190b34e4328d280325ad7ef55c6ac9abde72f79e84d2e"}, + {file = "jiter-0.9.0-cp39-cp39-win32.whl", hash = "sha256:161d461dcbe658cf0bd0aa375b30a968b087cdddc624fc585f3867c63c6eca95"}, + {file = "jiter-0.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:e8b36d8a16a61993be33e75126ad3d8aa29cf450b09576f3c427d27647fcb4aa"}, + {file = "jiter-0.9.0.tar.gz", hash = "sha256:aadba0964deb424daa24492abc3d229c60c4a31bfee205aedbf1acc7639d7893"}, +] + +[[package]] +name = "markdown" +version = "3.8" +description = "Python implementation of John Gruber's Markdown." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "markdown-3.8-py3-none-any.whl", hash = "sha256:794a929b79c5af141ef5ab0f2f642d0f7b1872981250230e72682346f7cc90dc"}, + {file = "markdown-3.8.tar.gz", hash = "sha256:7df81e63f0df5c4b24b7d156eb81e4690595239b7d70937d0409f1b0de319c6f"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["mdx_gh_links (>=0.2)", "mkdocs (>=1.6)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] +testing = ["coverage", "pyyaml"] + +[[package]] +name = "markupsafe" +version = "3.0.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "mcp" +version = "1.6.0" +description = "Model Context Protocol SDK" +optional = false +python-versions = ">=3.10" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "mcp-1.6.0-py3-none-any.whl", hash = "sha256:7bd24c6ea042dbec44c754f100984d186620d8b841ec30f1b19eda9b93a634d0"}, + {file = "mcp-1.6.0.tar.gz", hash = "sha256:d9324876de2c5637369f43161cd71eebfd803df5a95e46225cab8d280e366723"}, +] + +[package.dependencies] +anyio = ">=4.5" +httpx = ">=0.27" +httpx-sse = ">=0.4" +pydantic = ">=2.7.2,<3.0.0" +pydantic-settings = ">=2.5.2" +sse-starlette = ">=1.6.1" +starlette = ">=0.27" +uvicorn = ">=0.23.1" + +[package.extras] +cli = ["python-dotenv (>=1.0.0)", "typer (>=0.12.4)"] +rich = ["rich (>=13.9.4)"] +ws = ["websockets (>=15.0.1)"] + +[[package]] +name = "mergedeep" +version = "1.3.4" +description = "A deep merge function for 🐍." +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, + {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, +] + +[[package]] +name = "mkdocs" +version = "1.6.1" +description = "Project documentation with Markdown." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e"}, + {file = "mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} +ghp-import = ">=1.0" +importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} +jinja2 = ">=2.11.1" +markdown = ">=3.3.6" +markupsafe = ">=2.0.1" +mergedeep = ">=1.3.4" +mkdocs-get-deps = ">=0.2.0" +packaging = ">=20.5" +pathspec = ">=0.11.1" +pyyaml = ">=5.1" +pyyaml-env-tag = ">=0.1" +watchdog = ">=2.0" + +[package.extras] +i18n = ["babel (>=2.9.0)"] +min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4) ; platform_system == \"Windows\"", "ghp-import (==1.0)", "importlib-metadata (==4.4) ; python_version < \"3.10\"", "jinja2 (==2.11.1)", "markdown (==3.3.6)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "mkdocs-get-deps (==0.2.0)", "packaging (==20.5)", "pathspec (==0.11.1)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "watchdog (==2.0)"] + +[[package]] +name = "mkdocs-get-deps" +version = "0.2.0" +description = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134"}, + {file = "mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.3", markers = "python_version < \"3.10\""} +mergedeep = ">=1.3.4" +platformdirs = ">=2.2.0" +pyyaml = ">=5.1" + +[[package]] +name = "mkdocs-static-i18n" +version = "1.3.0" +description = "MkDocs i18n plugin using static translation markdown files" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "mkdocs_static_i18n-1.3.0-py3-none-any.whl", hash = "sha256:7905d52fff71d2c108b6c344fd223e848ca7e39ddf319b70864dfa47dba85d6b"}, + {file = "mkdocs_static_i18n-1.3.0.tar.gz", hash = "sha256:65731e1e4ec6d719693e24fee9340f5516460b2b7244d2a89bed4ce3cfa6a173"}, +] + +[package.dependencies] +mkdocs = ">=1.5.2" + +[package.extras] +material = ["mkdocs-material (>=9.2.5)"] + +[[package]] +name = "numpy" +version = "2.2.5" +description = "Fundamental package for array computing in Python" +optional = true +python-versions = ">=3.10" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"voice\"" +files = [ + {file = "numpy-2.2.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1f4a922da1729f4c40932b2af4fe84909c7a6e167e6e99f71838ce3a29f3fe26"}, + {file = "numpy-2.2.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b6f91524d31b34f4a5fee24f5bc16dcd1491b668798b6d85585d836c1e633a6a"}, + {file = "numpy-2.2.5-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:19f4718c9012e3baea91a7dba661dcab2451cda2550678dc30d53acb91a7290f"}, + {file = "numpy-2.2.5-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:eb7fd5b184e5d277afa9ec0ad5e4eb562ecff541e7f60e69ee69c8d59e9aeaba"}, + {file = "numpy-2.2.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6413d48a9be53e183eb06495d8e3b006ef8f87c324af68241bbe7a39e8ff54c3"}, + {file = "numpy-2.2.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7451f92eddf8503c9b8aa4fe6aa7e87fd51a29c2cfc5f7dbd72efde6c65acf57"}, + {file = "numpy-2.2.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0bcb1d057b7571334139129b7f941588f69ce7c4ed15a9d6162b2ea54ded700c"}, + {file = "numpy-2.2.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:36ab5b23915887543441efd0417e6a3baa08634308894316f446027611b53bf1"}, + {file = "numpy-2.2.5-cp310-cp310-win32.whl", hash = "sha256:422cc684f17bc963da5f59a31530b3936f57c95a29743056ef7a7903a5dbdf88"}, + {file = "numpy-2.2.5-cp310-cp310-win_amd64.whl", hash = "sha256:e4f0b035d9d0ed519c813ee23e0a733db81ec37d2e9503afbb6e54ccfdee0fa7"}, + {file = "numpy-2.2.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c42365005c7a6c42436a54d28c43fe0e01ca11eb2ac3cefe796c25a5f98e5e9b"}, + {file = "numpy-2.2.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:498815b96f67dc347e03b719ef49c772589fb74b8ee9ea2c37feae915ad6ebda"}, + {file = "numpy-2.2.5-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:6411f744f7f20081b1b4e7112e0f4c9c5b08f94b9f086e6f0adf3645f85d3a4d"}, + {file = "numpy-2.2.5-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:9de6832228f617c9ef45d948ec1cd8949c482238d68b2477e6f642c33a7b0a54"}, + {file = "numpy-2.2.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:369e0d4647c17c9363244f3468f2227d557a74b6781cb62ce57cf3ef5cc7c610"}, + {file = "numpy-2.2.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:262d23f383170f99cd9191a7c85b9a50970fe9069b2f8ab5d786eca8a675d60b"}, + {file = "numpy-2.2.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:aa70fdbdc3b169d69e8c59e65c07a1c9351ceb438e627f0fdcd471015cd956be"}, + {file = "numpy-2.2.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37e32e985f03c06206582a7323ef926b4e78bdaa6915095ef08070471865b906"}, + {file = "numpy-2.2.5-cp311-cp311-win32.whl", hash = "sha256:f5045039100ed58fa817a6227a356240ea1b9a1bc141018864c306c1a16d4175"}, + {file = "numpy-2.2.5-cp311-cp311-win_amd64.whl", hash = "sha256:b13f04968b46ad705f7c8a80122a42ae8f620536ea38cf4bdd374302926424dd"}, + {file = "numpy-2.2.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ee461a4eaab4f165b68780a6a1af95fb23a29932be7569b9fab666c407969051"}, + {file = "numpy-2.2.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ec31367fd6a255dc8de4772bd1658c3e926d8e860a0b6e922b615e532d320ddc"}, + {file = "numpy-2.2.5-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:47834cde750d3c9f4e52c6ca28a7361859fcaf52695c7dc3cc1a720b8922683e"}, + {file = "numpy-2.2.5-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:2c1a1c6ccce4022383583a6ded7bbcda22fc635eb4eb1e0a053336425ed36dfa"}, + {file = "numpy-2.2.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d75f338f5f79ee23548b03d801d28a505198297534f62416391857ea0479571"}, + {file = "numpy-2.2.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a801fef99668f309b88640e28d261991bfad9617c27beda4a3aec4f217ea073"}, + {file = "numpy-2.2.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:abe38cd8381245a7f49967a6010e77dbf3680bd3627c0fe4362dd693b404c7f8"}, + {file = "numpy-2.2.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5a0ac90e46fdb5649ab6369d1ab6104bfe5854ab19b645bf5cda0127a13034ae"}, + {file = "numpy-2.2.5-cp312-cp312-win32.whl", hash = "sha256:0cd48122a6b7eab8f06404805b1bd5856200e3ed6f8a1b9a194f9d9054631beb"}, + {file = "numpy-2.2.5-cp312-cp312-win_amd64.whl", hash = "sha256:ced69262a8278547e63409b2653b372bf4baff0870c57efa76c5703fd6543282"}, + {file = "numpy-2.2.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:059b51b658f4414fff78c6d7b1b4e18283ab5fa56d270ff212d5ba0c561846f4"}, + {file = "numpy-2.2.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:47f9ed103af0bc63182609044b0490747e03bd20a67e391192dde119bf43d52f"}, + {file = "numpy-2.2.5-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:261a1ef047751bb02f29dfe337230b5882b54521ca121fc7f62668133cb119c9"}, + {file = "numpy-2.2.5-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:4520caa3807c1ceb005d125a75e715567806fed67e315cea619d5ec6e75a4191"}, + {file = "numpy-2.2.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d14b17b9be5f9c9301f43d2e2a4886a33b53f4e6fdf9ca2f4cc60aeeee76372"}, + {file = "numpy-2.2.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ba321813a00e508d5421104464510cc962a6f791aa2fca1c97b1e65027da80d"}, + {file = "numpy-2.2.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4cbdef3ddf777423060c6f81b5694bad2dc9675f110c4b2a60dc0181543fac7"}, + {file = "numpy-2.2.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:54088a5a147ab71a8e7fdfd8c3601972751ded0739c6b696ad9cb0343e21ab73"}, + {file = "numpy-2.2.5-cp313-cp313-win32.whl", hash = "sha256:c8b82a55ef86a2d8e81b63da85e55f5537d2157165be1cb2ce7cfa57b6aef38b"}, + {file = "numpy-2.2.5-cp313-cp313-win_amd64.whl", hash = "sha256:d8882a829fd779f0f43998e931c466802a77ca1ee0fe25a3abe50278616b1471"}, + {file = "numpy-2.2.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:e8b025c351b9f0e8b5436cf28a07fa4ac0204d67b38f01433ac7f9b870fa38c6"}, + {file = "numpy-2.2.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8dfa94b6a4374e7851bbb6f35e6ded2120b752b063e6acdd3157e4d2bb922eba"}, + {file = "numpy-2.2.5-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:97c8425d4e26437e65e1d189d22dff4a079b747ff9c2788057bfb8114ce1e133"}, + {file = "numpy-2.2.5-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:352d330048c055ea6db701130abc48a21bec690a8d38f8284e00fab256dc1376"}, + {file = "numpy-2.2.5-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b4c0773b6ada798f51f0f8e30c054d32304ccc6e9c5d93d46cb26f3d385ab19"}, + {file = "numpy-2.2.5-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55f09e00d4dccd76b179c0f18a44f041e5332fd0e022886ba1c0bbf3ea4a18d0"}, + {file = "numpy-2.2.5-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:02f226baeefa68f7d579e213d0f3493496397d8f1cff5e2b222af274c86a552a"}, + {file = "numpy-2.2.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c26843fd58f65da9491165072da2cccc372530681de481ef670dcc8e27cfb066"}, + {file = "numpy-2.2.5-cp313-cp313t-win32.whl", hash = "sha256:1a161c2c79ab30fe4501d5a2bbfe8b162490757cf90b7f05be8b80bc02f7bb8e"}, + {file = "numpy-2.2.5-cp313-cp313t-win_amd64.whl", hash = "sha256:d403c84991b5ad291d3809bace5e85f4bbf44a04bdc9a88ed2bb1807b3360bb8"}, + {file = "numpy-2.2.5-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b4ea7e1cff6784e58fe281ce7e7f05036b3e1c89c6f922a6bfbc0a7e8768adbe"}, + {file = "numpy-2.2.5-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:d7543263084a85fbc09c704b515395398d31d6395518446237eac219eab9e55e"}, + {file = "numpy-2.2.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0255732338c4fdd00996c0421884ea8a3651eea555c3a56b84892b66f696eb70"}, + {file = "numpy-2.2.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d2e3bdadaba0e040d1e7ab39db73e0afe2c74ae277f5614dad53eadbecbbb169"}, + {file = "numpy-2.2.5.tar.gz", hash = "sha256:a9c0d994680cd991b1cb772e8b297340085466a6fe964bc9d4e80f5e2f43c291"}, +] + +[[package]] +name = "openai" +version = "1.75.0" +description = "The official Python library for the openai API" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "openai-1.75.0-py3-none-any.whl", hash = "sha256:fe6f932d2ded3b429ff67cc9ad118c71327db32eb9d32dd723de3acfca337125"}, + {file = "openai-1.75.0.tar.gz", hash = "sha256:fb3ea907efbdb1bcfd0c44507ad9c961afd7dce3147292b54505ecfd17be8fd1"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +jiter = ">=0.4.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tqdm = ">4" +typing-extensions = ">=4.11,<5" + +[package.extras] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] +realtime = ["websockets (>=13,<16)"] +voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"] + +[[package]] +name = "packaging" +version = "25.0" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "platformdirs" +version = "4.3.7" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "platformdirs-4.3.7-py3-none-any.whl", hash = "sha256:a03875334331946f13c549dbd8f4bac7a13a50a895a0eb1e8c6a8ace80d40a94"}, + {file = "platformdirs-4.3.7.tar.gz", hash = "sha256:eb437d586b6a0986388f0d6f74aa0cde27b48d0e3d66843640bfb6bdcdb6e351"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.14.1)"] + +[[package]] +name = "pydantic" +version = "2.11.3" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic-2.11.3-py3-none-any.whl", hash = "sha256:a082753436a07f9ba1289c6ffa01cd93db3548776088aa917cc43b63f68fa60f"}, + {file = "pydantic-2.11.3.tar.gz", hash = "sha256:7471657138c16adad9322fe3070c0116dd6c3ad8d649300e3cbdfe91f4db4ec3"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.33.1" +typing-extensions = ">=4.12.2" +typing-inspection = ">=0.4.0" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] + +[[package]] +name = "pydantic-core" +version = "2.33.1" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic_core-2.33.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3077cfdb6125cc8dab61b155fdd714663e401f0e6883f9632118ec12cf42df26"}, + {file = "pydantic_core-2.33.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8ffab8b2908d152e74862d276cf5017c81a2f3719f14e8e3e8d6b83fda863927"}, + {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5183e4f6a2d468787243ebcd70cf4098c247e60d73fb7d68d5bc1e1beaa0c4db"}, + {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:398a38d323f37714023be1e0285765f0a27243a8b1506b7b7de87b647b517e48"}, + {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87d3776f0001b43acebfa86f8c64019c043b55cc5a6a2e313d728b5c95b46969"}, + {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c566dd9c5f63d22226409553531f89de0cac55397f2ab8d97d6f06cfce6d947e"}, + {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d5f3acc81452c56895e90643a625302bd6be351e7010664151cc55b7b97f89"}, + {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d3a07fadec2a13274a8d861d3d37c61e97a816beae717efccaa4b36dfcaadcde"}, + {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f99aeda58dce827f76963ee87a0ebe75e648c72ff9ba1174a253f6744f518f65"}, + {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:902dbc832141aa0ec374f4310f1e4e7febeebc3256f00dc359a9ac3f264a45dc"}, + {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fe44d56aa0b00d66640aa84a3cbe80b7a3ccdc6f0b1ca71090696a6d4777c091"}, + {file = "pydantic_core-2.33.1-cp310-cp310-win32.whl", hash = "sha256:ed3eb16d51257c763539bde21e011092f127a2202692afaeaccb50db55a31383"}, + {file = "pydantic_core-2.33.1-cp310-cp310-win_amd64.whl", hash = "sha256:694ad99a7f6718c1a498dc170ca430687a39894a60327f548e02a9c7ee4b6504"}, + {file = "pydantic_core-2.33.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6e966fc3caaf9f1d96b349b0341c70c8d6573bf1bac7261f7b0ba88f96c56c24"}, + {file = "pydantic_core-2.33.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bfd0adeee563d59c598ceabddf2c92eec77abcb3f4a391b19aa7366170bd9e30"}, + {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91815221101ad3c6b507804178a7bb5cb7b2ead9ecd600041669c8d805ebd595"}, + {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9fea9c1869bb4742d174a57b4700c6dadea951df8b06de40c2fedb4f02931c2e"}, + {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d20eb4861329bb2484c021b9d9a977566ab16d84000a57e28061151c62b349a"}, + {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb935c5591573ae3201640579f30128ccc10739b45663f93c06796854405505"}, + {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c964fd24e6166420d18fb53996d8c9fd6eac9bf5ae3ec3d03015be4414ce497f"}, + {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:681d65e9011f7392db5aa002b7423cc442d6a673c635668c227c6c8d0e5a4f77"}, + {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e100c52f7355a48413e2999bfb4e139d2977a904495441b374f3d4fb4a170961"}, + {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:048831bd363490be79acdd3232f74a0e9951b11b2b4cc058aeb72b22fdc3abe1"}, + {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bdc84017d28459c00db6f918a7272a5190bec3090058334e43a76afb279eac7c"}, + {file = "pydantic_core-2.33.1-cp311-cp311-win32.whl", hash = "sha256:32cd11c5914d1179df70406427097c7dcde19fddf1418c787540f4b730289896"}, + {file = "pydantic_core-2.33.1-cp311-cp311-win_amd64.whl", hash = "sha256:2ea62419ba8c397e7da28a9170a16219d310d2cf4970dbc65c32faf20d828c83"}, + {file = "pydantic_core-2.33.1-cp311-cp311-win_arm64.whl", hash = "sha256:fc903512177361e868bc1f5b80ac8c8a6e05fcdd574a5fb5ffeac5a9982b9e89"}, + {file = "pydantic_core-2.33.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1293d7febb995e9d3ec3ea09caf1a26214eec45b0f29f6074abb004723fc1de8"}, + {file = "pydantic_core-2.33.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:99b56acd433386c8f20be5c4000786d1e7ca0523c8eefc995d14d79c7a081498"}, + {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35a5ec3fa8c2fe6c53e1b2ccc2454398f95d5393ab398478f53e1afbbeb4d939"}, + {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b172f7b9d2f3abc0efd12e3386f7e48b576ef309544ac3a63e5e9cdd2e24585d"}, + {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9097b9f17f91eea659b9ec58148c0747ec354a42f7389b9d50701610d86f812e"}, + {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc77ec5b7e2118b152b0d886c7514a4653bcb58c6b1d760134a9fab915f777b3"}, + {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3d15245b08fa4a84cefc6c9222e6f37c98111c8679fbd94aa145f9a0ae23d"}, + {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef99779001d7ac2e2461d8ab55d3373fe7315caefdbecd8ced75304ae5a6fc6b"}, + {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:fc6bf8869e193855e8d91d91f6bf59699a5cdfaa47a404e278e776dd7f168b39"}, + {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:b1caa0bc2741b043db7823843e1bde8aaa58a55a58fda06083b0569f8b45693a"}, + {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ec259f62538e8bf364903a7d0d0239447059f9434b284f5536e8402b7dd198db"}, + {file = "pydantic_core-2.33.1-cp312-cp312-win32.whl", hash = "sha256:e14f369c98a7c15772b9da98987f58e2b509a93235582838bd0d1d8c08b68fda"}, + {file = "pydantic_core-2.33.1-cp312-cp312-win_amd64.whl", hash = "sha256:1c607801d85e2e123357b3893f82c97a42856192997b95b4d8325deb1cd0c5f4"}, + {file = "pydantic_core-2.33.1-cp312-cp312-win_arm64.whl", hash = "sha256:8d13f0276806ee722e70a1c93da19748594f19ac4299c7e41237fc791d1861ea"}, + {file = "pydantic_core-2.33.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:70af6a21237b53d1fe7b9325b20e65cbf2f0a848cf77bed492b029139701e66a"}, + {file = "pydantic_core-2.33.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:282b3fe1bbbe5ae35224a0dbd05aed9ccabccd241e8e6b60370484234b456266"}, + {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b315e596282bbb5822d0c7ee9d255595bd7506d1cb20c2911a4da0b970187d3"}, + {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1dfae24cf9921875ca0ca6a8ecb4bb2f13c855794ed0d468d6abbec6e6dcd44a"}, + {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6dd8ecfde08d8bfadaea669e83c63939af76f4cf5538a72597016edfa3fad516"}, + {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f593494876eae852dc98c43c6f260f45abdbfeec9e4324e31a481d948214764"}, + {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:948b73114f47fd7016088e5186d13faf5e1b2fe83f5e320e371f035557fd264d"}, + {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e11f3864eb516af21b01e25fac915a82e9ddad3bb0fb9e95a246067398b435a4"}, + {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:549150be302428b56fdad0c23c2741dcdb5572413776826c965619a25d9c6bde"}, + {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:495bc156026efafd9ef2d82372bd38afce78ddd82bf28ef5276c469e57c0c83e"}, + {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ec79de2a8680b1a67a07490bddf9636d5c2fab609ba8c57597e855fa5fa4dacd"}, + {file = "pydantic_core-2.33.1-cp313-cp313-win32.whl", hash = "sha256:ee12a7be1742f81b8a65b36c6921022301d466b82d80315d215c4c691724986f"}, + {file = "pydantic_core-2.33.1-cp313-cp313-win_amd64.whl", hash = "sha256:ede9b407e39949d2afc46385ce6bd6e11588660c26f80576c11c958e6647bc40"}, + {file = "pydantic_core-2.33.1-cp313-cp313-win_arm64.whl", hash = "sha256:aa687a23d4b7871a00e03ca96a09cad0f28f443690d300500603bd0adba4b523"}, + {file = "pydantic_core-2.33.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:401d7b76e1000d0dd5538e6381d28febdcacb097c8d340dde7d7fc6e13e9f95d"}, + {file = "pydantic_core-2.33.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aeb055a42d734c0255c9e489ac67e75397d59c6fbe60d155851e9782f276a9c"}, + {file = "pydantic_core-2.33.1-cp313-cp313t-win_amd64.whl", hash = "sha256:338ea9b73e6e109f15ab439e62cb3b78aa752c7fd9536794112e14bee02c8d18"}, + {file = "pydantic_core-2.33.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5ab77f45d33d264de66e1884fca158bc920cb5e27fd0764a72f72f5756ae8bdb"}, + {file = "pydantic_core-2.33.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7aaba1b4b03aaea7bb59e1b5856d734be011d3e6d98f5bcaa98cb30f375f2ad"}, + {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fb66263e9ba8fea2aa85e1e5578980d127fb37d7f2e292773e7bc3a38fb0c7b"}, + {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3f2648b9262607a7fb41d782cc263b48032ff7a03a835581abbf7a3bec62bcf5"}, + {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:723c5630c4259400818b4ad096735a829074601805d07f8cafc366d95786d331"}, + {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d100e3ae783d2167782391e0c1c7a20a31f55f8015f3293647544df3f9c67824"}, + {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177d50460bc976a0369920b6c744d927b0ecb8606fb56858ff542560251b19e5"}, + {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3edde68d1a1f9af1273b2fe798997b33f90308fb6d44d8550c89fc6a3647cf6"}, + {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a62c3c3ef6a7e2c45f7853b10b5bc4ddefd6ee3cd31024754a1a5842da7d598d"}, + {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:c91dbb0ab683fa0cd64a6e81907c8ff41d6497c346890e26b23de7ee55353f96"}, + {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9f466e8bf0a62dc43e068c12166281c2eca72121dd2adc1040f3aa1e21ef8599"}, + {file = "pydantic_core-2.33.1-cp39-cp39-win32.whl", hash = "sha256:ab0277cedb698749caada82e5d099dc9fed3f906a30d4c382d1a21725777a1e5"}, + {file = "pydantic_core-2.33.1-cp39-cp39-win_amd64.whl", hash = "sha256:5773da0ee2d17136b1f1c6fbde543398d452a6ad2a7b54ea1033e2daa739b8d2"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c834f54f8f4640fd7e4b193f80eb25a0602bba9e19b3cd2fc7ffe8199f5ae02"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:049e0de24cf23766f12cc5cc71d8abc07d4a9deb9061b334b62093dedc7cb068"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a28239037b3d6f16916a4c831a5a0eadf856bdd6d2e92c10a0da3a59eadcf3e"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d3da303ab5f378a268fa7d45f37d7d85c3ec19769f28d2cc0c61826a8de21fe"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25626fb37b3c543818c14821afe0fd3830bc327a43953bc88db924b68c5723f1"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3ab2d36e20fbfcce8f02d73c33a8a7362980cff717926bbae030b93ae46b56c7"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:2f9284e11c751b003fd4215ad92d325d92c9cb19ee6729ebd87e3250072cdcde"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:048c01eee07d37cbd066fc512b9d8b5ea88ceeb4e629ab94b3e56965ad655add"}, + {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5ccd429694cf26af7997595d627dd2637e7932214486f55b8a357edaac9dae8c"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3a371dc00282c4b84246509a5ddc808e61b9864aa1eae9ecc92bb1268b82db4a"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:f59295ecc75a1788af8ba92f2e8c6eeaa5a94c22fc4d151e8d9638814f85c8fc"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08530b8ac922003033f399128505f513e30ca770527cc8bbacf75a84fcc2c74b"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae370459da6a5466978c0eacf90690cb57ec9d533f8e63e564ef3822bfa04fe"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e3de2777e3b9f4d603112f78006f4ae0acb936e95f06da6cb1a45fbad6bdb4b5"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3a64e81e8cba118e108d7126362ea30e021291b7805d47e4896e52c791be2761"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:52928d8c1b6bda03cc6d811e8923dffc87a2d3c8b3bfd2ce16471c7147a24850"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1b30d92c9412beb5ac6b10a3eb7ef92ccb14e3f2a8d7732e2d739f58b3aa7544"}, + {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f995719707e0e29f0f41a8aa3bcea6e761a36c9136104d3189eafb83f5cec5e5"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7edbc454a29fc6aeae1e1eecba4f07b63b8d76e76a748532233c4c167b4cb9ea"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ad05b683963f69a1d5d2c2bdab1274a31221ca737dbbceaa32bcb67359453cdd"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df6a94bf9452c6da9b5d76ed229a5683d0306ccb91cca8e1eea883189780d568"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7965c13b3967909a09ecc91f21d09cfc4576bf78140b988904e94f130f188396"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3f1fdb790440a34f6ecf7679e1863b825cb5ffde858a9197f851168ed08371e5"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5277aec8d879f8d05168fdd17ae811dd313b8ff894aeeaf7cd34ad28b4d77e33"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8ab581d3530611897d863d1a649fb0644b860286b4718db919bfd51ece41f10b"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0483847fa9ad5e3412265c1bd72aad35235512d9ce9d27d81a56d935ef489672"}, + {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:de9e06abe3cc5ec6a2d5f75bc99b0bdca4f5c719a5b34026f8c57efbdecd2ee3"}, + {file = "pydantic_core-2.33.1.tar.gz", hash = "sha256:bcc9c6fdb0ced789245b02b7d6603e17d1563064ddcfc36f046b61c0c05dd9df"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pydantic-settings" +version = "2.9.1" +description = "Settings management using Pydantic" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "pydantic_settings-2.9.1-py3-none-any.whl", hash = "sha256:59b4f431b1defb26fe620c71a7d3968a710d719f5f4cdbbdb7926edeb770f6ef"}, + {file = "pydantic_settings-2.9.1.tar.gz", hash = "sha256:c509bf79d27563add44e8446233359004ed85066cd096d8b510f715e6ef5d268"}, +] + +[package.dependencies] +pydantic = ">=2.7.0" +python-dotenv = ">=0.21.0" +typing-inspection = ">=0.4.0" + +[package.extras] +aws-secrets-manager = ["boto3 (>=1.35.0)", "boto3-stubs[secretsmanager]"] +azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"] +gcp-secret-manager = ["google-cloud-secret-manager (>=2.23.1)"] +toml = ["tomli (>=2.0.1)"] +yaml = ["pyyaml (>=6.0.1)"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-dotenv" +version = "1.1.0" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"}, + {file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "pyyaml-env-tag" +version = "0.1" +description = "A custom YAML tag for referencing environment variables in YAML files. " +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "pyyaml_env_tag-0.1-py3-none-any.whl", hash = "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"}, + {file = "pyyaml_env_tag-0.1.tar.gz", hash = "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb"}, +] + +[package.dependencies] +pyyaml = "*" + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] +files = [ + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "sse-starlette" +version = "2.2.1" +description = "SSE plugin for Starlette" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\"" +files = [ + {file = "sse_starlette-2.2.1-py3-none-any.whl", hash = "sha256:6410a3d3ba0c89e7675d4c273a301d64649c03a5ef1ca101f10b47f895fd0e99"}, + {file = "sse_starlette-2.2.1.tar.gz", hash = "sha256:54470d5f19274aeed6b2d473430b08b4b379ea851d953b11d7f1c4a2c118b419"}, +] + +[package.dependencies] +anyio = ">=4.7.0" +starlette = ">=0.41.3" + +[package.extras] +examples = ["fastapi"] +uvicorn = ["uvicorn (>=0.34.0)"] + +[[package]] +name = "starlette" +version = "0.46.2" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "starlette-0.46.2-py3-none-any.whl", hash = "sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35"}, + {file = "starlette-0.46.2.tar.gz", hash = "sha256:7f7361f34eed179294600af672f565727419830b54b7b084efe44bb82d2fccd5"}, +] + +[package.dependencies] +anyio = ">=3.6.2,<5" +typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} + +[package.extras] +full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] + +[[package]] +name = "tqdm" +version = "4.67.1" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, + {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["nbval", "pytest (>=6)", "pytest-asyncio (>=0.24)", "pytest-cov", "pytest-timeout"] +discord = ["requests"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "types-requests" +version = "2.32.0.20250328" +description = "Typing stubs for requests" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "types_requests-2.32.0.20250328-py3-none-any.whl", hash = "sha256:72ff80f84b15eb3aa7a8e2625fffb6a93f2ad5a0c20215fc1dcfa61117bcb2a2"}, + {file = "types_requests-2.32.0.20250328.tar.gz", hash = "sha256:c9e67228ea103bd811c96984fac36ed2ae8da87a36a633964a21f199d60baf32"}, +] + +[package.dependencies] +urllib3 = ">=2" + +[[package]] +name = "typing-extensions" +version = "4.13.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, + {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, +] + +[[package]] +name = "typing-inspection" +version = "0.4.0" +description = "Runtime typing introspection tools" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, + {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, +] + +[package.dependencies] +typing-extensions = ">=4.12.0" + +[[package]] +name = "urllib3" +version = "2.4.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, + {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "uvicorn" +version = "0.34.2" +description = "The lightning-fast ASGI server." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "uvicorn-0.34.2-py3-none-any.whl", hash = "sha256:deb49af569084536d269fe0a6d67e3754f104cf03aba7c11c40f01aadf33c403"}, + {file = "uvicorn-0.34.2.tar.gz", hash = "sha256:0e929828f6186353a80b58ea719861d2629d766293b6d19baf086ba31d4f3328"}, +] + +[package.dependencies] +click = ">=7.0" +h11 = ">=0.8" +typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} + +[package.extras] +standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] + +[[package]] +name = "watchdog" +version = "6.0.0" +description = "Filesystem events monitoring" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e6f0e77c9417e7cd62af82529b10563db3423625c5fce018430b249bf977f9e8"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:90c8e78f3b94014f7aaae121e6b909674df5b46ec24d6bebc45c44c56729af2a"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7631a77ffb1f7d2eefa4445ebbee491c720a5661ddf6df3498ebecae5ed375c"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7a0e56874cfbc4b9b05c60c8a1926fedf56324bb08cfbc188969777940aef3aa"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e6439e374fc012255b4ec786ae3c4bc838cd7309a540e5fe0952d03687d8804e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2"}, + {file = "watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a"}, + {file = "watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680"}, + {file = "watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f"}, + {file = "watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + +[[package]] +name = "websockets" +version = "15.0.1" +description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +optional = true +python-versions = ">=3.9" +groups = ["main"] +markers = "extra == \"voice\"" +files = [ + {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b"}, + {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205"}, + {file = "websockets-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5756779642579d902eed757b21b0164cd6fe338506a8083eb58af5c372e39d9a"}, + {file = "websockets-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdfe3e2a29e4db3659dbd5bbf04560cea53dd9610273917799f1cde46aa725e"}, + {file = "websockets-15.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2529b320eb9e35af0fa3016c187dffb84a3ecc572bcee7c3ce302bfeba52bf"}, + {file = "websockets-15.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac1e5c9054fe23226fb11e05a6e630837f074174c4c2f0fe442996112a6de4fb"}, + {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5df592cd503496351d6dc14f7cdad49f268d8e618f80dce0cd5a36b93c3fc08d"}, + {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0a34631031a8f05657e8e90903e656959234f3a04552259458aac0b0f9ae6fd9"}, + {file = "websockets-15.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3d00075aa65772e7ce9e990cab3ff1de702aa09be3940d1dc88d5abf1ab8a09c"}, + {file = "websockets-15.0.1-cp310-cp310-win32.whl", hash = "sha256:1234d4ef35db82f5446dca8e35a7da7964d02c127b095e172e54397fb6a6c256"}, + {file = "websockets-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:39c1fec2c11dc8d89bba6b2bf1556af381611a173ac2b511cf7231622058af41"}, + {file = "websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431"}, + {file = "websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57"}, + {file = "websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905"}, + {file = "websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562"}, + {file = "websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792"}, + {file = "websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413"}, + {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8"}, + {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3"}, + {file = "websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf"}, + {file = "websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85"}, + {file = "websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065"}, + {file = "websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3"}, + {file = "websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665"}, + {file = "websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2"}, + {file = "websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215"}, + {file = "websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5"}, + {file = "websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65"}, + {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe"}, + {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4"}, + {file = "websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597"}, + {file = "websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9"}, + {file = "websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7"}, + {file = "websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931"}, + {file = "websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675"}, + {file = "websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151"}, + {file = "websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22"}, + {file = "websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f"}, + {file = "websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8"}, + {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375"}, + {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d"}, + {file = "websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4"}, + {file = "websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa"}, + {file = "websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561"}, + {file = "websockets-15.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5f4c04ead5aed67c8a1a20491d54cdfba5884507a48dd798ecaf13c74c4489f5"}, + {file = "websockets-15.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abdc0c6c8c648b4805c5eacd131910d2a7f6455dfd3becab248ef108e89ab16a"}, + {file = "websockets-15.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a625e06551975f4b7ea7102bc43895b90742746797e2e14b70ed61c43a90f09b"}, + {file = "websockets-15.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d591f8de75824cbb7acad4e05d2d710484f15f29d4a915092675ad3456f11770"}, + {file = "websockets-15.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47819cea040f31d670cc8d324bb6435c6f133b8c7a19ec3d61634e62f8d8f9eb"}, + {file = "websockets-15.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac017dd64572e5c3bd01939121e4d16cf30e5d7e110a119399cf3133b63ad054"}, + {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4a9fac8e469d04ce6c25bb2610dc535235bd4aa14996b4e6dbebf5e007eba5ee"}, + {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363c6f671b761efcb30608d24925a382497c12c506b51661883c3e22337265ed"}, + {file = "websockets-15.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2034693ad3097d5355bfdacfffcbd3ef5694f9718ab7f29c29689a9eae841880"}, + {file = "websockets-15.0.1-cp39-cp39-win32.whl", hash = "sha256:3b1ac0d3e594bf121308112697cf4b32be538fb1444468fb0a6ae4feebc83411"}, + {file = "websockets-15.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7643a03db5c95c799b89b31c036d5f27eeb4d259c798e878d6937d71832b1e4"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0c9e74d766f2818bb95f84c25be4dea09841ac0f734d1966f415e4edfc4ef1c3"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1009ee0c7739c08a0cd59de430d6de452a55e42d6b522de7aa15e6f67db0b8e1"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d1f20b1c7a2fa82367e04982e708723ba0e7b8d43aa643d3dcd404d74f1475"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f29d80eb9a9263b8d109135351caf568cc3f80b9928bccde535c235de55c22d9"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b359ed09954d7c18bbc1680f380c7301f92c60bf924171629c5db97febb12f04"}, + {file = "websockets-15.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7f493881579c90fc262d9cdbaa05a6b54b3811c2f300766748db79f098db9940"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:47b099e1f4fbc95b701b6e85768e1fcdaf1630f3cbe4765fa216596f12310e2e"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67f2b6de947f8c757db2db9c71527933ad0019737ec374a8a6be9a956786aaf9"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d08eb4c2b7d6c41da6ca0600c077e93f5adcfd979cd777d747e9ee624556da4b"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b826973a4a2ae47ba357e4e82fa44a463b8f168e1ca775ac64521442b19e87f"}, + {file = "websockets-15.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:21c1fa28a6a7e3cbdc171c694398b6df4744613ce9b36b1a498e816787e28123"}, + {file = "websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f"}, + {file = "websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee"}, +] + +[[package]] +name = "zipp" +version = "3.21.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version == \"3.9\"" +files = [ + {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, + {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + +[extras] +viz = ["graphviz"] +voice = ["numpy", "websockets"] + +[metadata] +lock-version = "2.1" +python-versions = ">=3.9" +content-hash = "7524129f4fa36aecd69feeb18bf9505b86bbd9a11805ee318203fcef26cc5f4e" diff --git a/pyproject.toml b/pyproject.toml index ca398ba2..cbb184c2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,21 +1,27 @@ [project] -name = "openai-agents" +name = "rightnow-agent-app" version = "0.0.9" -description = "OpenAI Agents SDK" +description = "OpenAI Agents SDK demo app" readme = "README.md" requires-python = ">=3.9" license = "MIT" authors = [{ name = "OpenAI", email = "support@openai.com" }] + dependencies = [ + # ---------- External deps ---------- "openai>=1.66.5", - "pydantic>=2.10, <3", - "griffe>=1.5.6, <2", - "typing-extensions>=4.12.2, <5", - "requests>=2.0, <3", - "types-requests>=2.0, <3", - "mcp>=1.6.0, <2; python_version >= '3.10'", + "openai-agents==0.0.14", # ← from PyPI, no more git install + "pydantic>=2.10,<3", + "griffe>=1.5.6,<2", + "typing-extensions>=4.12.2,<5", + "requests>=2.0,<3", + "types-requests>=2.0,<3", + "mcp>=1.6.0,<2; python_version >= '3.10'", "mkdocs-static-i18n>=1.3.0", + "fastapi>=0.110.0", + "uvicorn>=0.34.0", ] + classifiers = [ "Typing :: Typed", "Intended Audience :: Developers", @@ -24,20 +30,22 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", - "Intended Audience :: Developers", "Operating System :: OS Independent", "Topic :: Software Development :: Libraries :: Python Modules", "License :: OSI Approved :: MIT License", ] [project.urls] -Homepage = "https://github.com/openai/openai-agents-python" +Homepage = "https://github.com/openai/openai-agents-python" Repository = "https://github.com/openai/openai-agents-python" [project.optional-dependencies] -voice = ["numpy>=2.2.0, <3; python_version>='3.10'", "websockets>=15.0, <16"] -viz = ["graphviz>=0.17"] +voice = ["numpy>=2.2.0,<3; python_version>='3.10'", "websockets>=15.0,<16"] +viz = ["graphviz>=0.17"] +# ----------------------------------------------------------------- +# Dev / tooling +# ----------------------------------------------------------------- [dependency-groups] dev = [ "mypy", @@ -61,35 +69,26 @@ dev = [ "graphviz", ] -[tool.uv.workspace] -members = ["agents"] - -[tool.uv.sources] -agents = { workspace = true } - +# ----------------------------------------------------------------- +# Build configuration +# ----------------------------------------------------------------- [build-system] requires = ["hatchling"] build-backend = "hatchling.build" [tool.hatch.build.targets.wheel] -packages = ["src/agents"] - +packages = ["src/app"] # ← path updated +# ----------------------------------------------------------------- +# Tool configs +# ----------------------------------------------------------------- [tool.ruff] -line-length = 100 -target-version = "py39" +line-length = 100 +target-version = "py39" [tool.ruff.lint] -select = [ - "E", # pycodestyle errors - "W", # pycodestyle warnings - "F", # pyflakes - "I", # isort - "B", # flake8-bugbear - "C4", # flake8-comprehensions - "UP", # pyupgrade -] -isort = { combine-as-imports = true, known-first-party = ["agents"] } +select = ["E", "W", "F", "I", "B", "C4", "UP"] +isort = { combine-as-imports = true, known-first-party = ["app"] } # ← updated [tool.ruff.lint.pydocstyle] convention = "google" @@ -98,23 +97,22 @@ convention = "google" "examples/**/*.py" = ["E501"] [tool.mypy] -strict = true -disallow_incomplete_defs = false -disallow_untyped_defs = false -disallow_untyped_calls = false +strict = true +disallow_incomplete_defs = false +disallow_untyped_defs = false +disallow_untyped_calls = false [[tool.mypy.overrides]] module = "sounddevice.*" ignore_missing_imports = true [tool.coverage.run] -source = ["tests", "src/agents"] +source = ["tests", "src/app"] # ← updated [tool.coverage.report] show_missing = true -sort = "-Cover" -exclude_also = [ - # This is only executed while typechecking +sort = "-Cover" +exclude_also = [ "if TYPE_CHECKING:", "@abc.abstractmethod", "raise NotImplementedError", @@ -122,11 +120,10 @@ exclude_also = [ ] [tool.pytest.ini_options] -asyncio_mode = "auto" +asyncio_mode = "auto" asyncio_default_fixture_loop_scope = "session" filterwarnings = [ - # This is a warning that is expected to happen: we have an async filter that raises an exception - "ignore:coroutine 'test_async_input_filter_fails..invalid_input_filter' was never awaited:RuntimeWarning", + "ignore:coroutine .* was never awaited:RuntimeWarning", ] markers = [ "allow_call_model_methods: mark test as allowing calls to real model implementations", diff --git a/src/agents/__init__.py b/src/agents/__init__.py deleted file mode 100644 index db7d312e..00000000 --- a/src/agents/__init__.py +++ /dev/null @@ -1,252 +0,0 @@ -import logging -import sys -from typing import Literal - -from openai import AsyncOpenAI - -from . import _config -from .agent import Agent, ToolsToFinalOutputFunction, ToolsToFinalOutputResult -from .agent_output import AgentOutputSchema -from .computer import AsyncComputer, Button, Computer, Environment -from .exceptions import ( - AgentsException, - InputGuardrailTripwireTriggered, - MaxTurnsExceeded, - ModelBehaviorError, - OutputGuardrailTripwireTriggered, - UserError, -) -from .guardrail import ( - GuardrailFunctionOutput, - InputGuardrail, - InputGuardrailResult, - OutputGuardrail, - OutputGuardrailResult, - input_guardrail, - output_guardrail, -) -from .handoffs import Handoff, HandoffInputData, HandoffInputFilter, handoff -from .items import ( - HandoffCallItem, - HandoffOutputItem, - ItemHelpers, - MessageOutputItem, - ModelResponse, - ReasoningItem, - RunItem, - ToolCallItem, - ToolCallOutputItem, - TResponseInputItem, -) -from .lifecycle import AgentHooks, RunHooks -from .model_settings import ModelSettings -from .models.interface import Model, ModelProvider, ModelTracing -from .models.openai_chatcompletions import OpenAIChatCompletionsModel -from .models.openai_provider import OpenAIProvider -from .models.openai_responses import OpenAIResponsesModel -from .result import RunResult, RunResultStreaming -from .run import RunConfig, Runner -from .run_context import RunContextWrapper, TContext -from .stream_events import ( - AgentUpdatedStreamEvent, - RawResponsesStreamEvent, - RunItemStreamEvent, - StreamEvent, -) -from .tool import ( - ComputerTool, - FileSearchTool, - FunctionTool, - FunctionToolResult, - Tool, - WebSearchTool, - default_tool_error_function, - function_tool, -) -from .tracing import ( - AgentSpanData, - CustomSpanData, - FunctionSpanData, - GenerationSpanData, - GuardrailSpanData, - HandoffSpanData, - MCPListToolsSpanData, - Span, - SpanData, - SpanError, - SpeechGroupSpanData, - SpeechSpanData, - Trace, - TracingProcessor, - TranscriptionSpanData, - add_trace_processor, - agent_span, - custom_span, - function_span, - gen_span_id, - gen_trace_id, - generation_span, - get_current_span, - get_current_trace, - guardrail_span, - handoff_span, - mcp_tools_span, - set_trace_processors, - set_tracing_disabled, - set_tracing_export_api_key, - speech_group_span, - speech_span, - trace, - transcription_span, -) -from .usage import Usage -from .version import __version__ - - -def set_default_openai_key(key: str, use_for_tracing: bool = True) -> None: - """Set the default OpenAI API key to use for LLM requests (and optionally tracing(). This is - only necessary if the OPENAI_API_KEY environment variable is not already set. - - If provided, this key will be used instead of the OPENAI_API_KEY environment variable. - - Args: - key: The OpenAI key to use. - use_for_tracing: Whether to also use this key to send traces to OpenAI. Defaults to True - If False, you'll either need to set the OPENAI_API_KEY environment variable or call - set_tracing_export_api_key() with the API key you want to use for tracing. - """ - _config.set_default_openai_key(key, use_for_tracing) - - -def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool = True) -> None: - """Set the default OpenAI client to use for LLM requests and/or tracing. If provided, this - client will be used instead of the default OpenAI client. - - Args: - client: The OpenAI client to use. - use_for_tracing: Whether to use the API key from this client for uploading traces. If False, - you'll either need to set the OPENAI_API_KEY environment variable or call - set_tracing_export_api_key() with the API key you want to use for tracing. - """ - _config.set_default_openai_client(client, use_for_tracing) - - -def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> None: - """Set the default API to use for OpenAI LLM requests. By default, we will use the responses API - but you can set this to use the chat completions API instead. - """ - _config.set_default_openai_api(api) - - -def enable_verbose_stdout_logging(): - """Enables verbose logging to stdout. This is useful for debugging.""" - logger = logging.getLogger("openai.agents") - logger.setLevel(logging.DEBUG) - logger.addHandler(logging.StreamHandler(sys.stdout)) - - -__all__ = [ - "Agent", - "ToolsToFinalOutputFunction", - "ToolsToFinalOutputResult", - "Runner", - "Model", - "ModelProvider", - "ModelTracing", - "ModelSettings", - "OpenAIChatCompletionsModel", - "OpenAIProvider", - "OpenAIResponsesModel", - "AgentOutputSchema", - "Computer", - "AsyncComputer", - "Environment", - "Button", - "AgentsException", - "InputGuardrailTripwireTriggered", - "OutputGuardrailTripwireTriggered", - "MaxTurnsExceeded", - "ModelBehaviorError", - "UserError", - "InputGuardrail", - "InputGuardrailResult", - "OutputGuardrail", - "OutputGuardrailResult", - "GuardrailFunctionOutput", - "input_guardrail", - "output_guardrail", - "handoff", - "Handoff", - "HandoffInputData", - "HandoffInputFilter", - "TResponseInputItem", - "MessageOutputItem", - "ModelResponse", - "RunItem", - "HandoffCallItem", - "HandoffOutputItem", - "ToolCallItem", - "ToolCallOutputItem", - "ReasoningItem", - "ModelResponse", - "ItemHelpers", - "RunHooks", - "AgentHooks", - "RunContextWrapper", - "TContext", - "RunResult", - "RunResultStreaming", - "RunConfig", - "RawResponsesStreamEvent", - "RunItemStreamEvent", - "AgentUpdatedStreamEvent", - "StreamEvent", - "FunctionTool", - "FunctionToolResult", - "ComputerTool", - "FileSearchTool", - "Tool", - "WebSearchTool", - "function_tool", - "Usage", - "add_trace_processor", - "agent_span", - "custom_span", - "function_span", - "generation_span", - "get_current_span", - "get_current_trace", - "guardrail_span", - "handoff_span", - "set_trace_processors", - "set_tracing_disabled", - "speech_group_span", - "transcription_span", - "speech_span", - "mcp_tools_span", - "trace", - "Trace", - "TracingProcessor", - "SpanError", - "Span", - "SpanData", - "AgentSpanData", - "CustomSpanData", - "FunctionSpanData", - "GenerationSpanData", - "GuardrailSpanData", - "HandoffSpanData", - "SpeechGroupSpanData", - "SpeechSpanData", - "MCPListToolsSpanData", - "TranscriptionSpanData", - "set_default_openai_key", - "set_default_openai_client", - "set_default_openai_api", - "set_tracing_export_api_key", - "enable_verbose_stdout_logging", - "gen_trace_id", - "gen_span_id", - "default_tool_error_function", - "__version__", -] diff --git a/src/agents/_config.py b/src/agents/_config.py deleted file mode 100644 index 304cfb83..00000000 --- a/src/agents/_config.py +++ /dev/null @@ -1,26 +0,0 @@ -from openai import AsyncOpenAI -from typing_extensions import Literal - -from .models import _openai_shared -from .tracing import set_tracing_export_api_key - - -def set_default_openai_key(key: str, use_for_tracing: bool) -> None: - _openai_shared.set_default_openai_key(key) - - if use_for_tracing: - set_tracing_export_api_key(key) - - -def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool) -> None: - _openai_shared.set_default_openai_client(client) - - if use_for_tracing: - set_tracing_export_api_key(client.api_key) - - -def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> None: - if api == "chat_completions": - _openai_shared.set_use_responses_by_default(False) - else: - _openai_shared.set_use_responses_by_default(True) diff --git a/src/agents/_debug.py b/src/agents/_debug.py deleted file mode 100644 index 4da91be4..00000000 --- a/src/agents/_debug.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - - -def _debug_flag_enabled(flag: str) -> bool: - flag_value = os.getenv(flag) - return flag_value is not None and (flag_value == "1" or flag_value.lower() == "true") - - -DONT_LOG_MODEL_DATA = _debug_flag_enabled("OPENAI_AGENTS_DONT_LOG_MODEL_DATA") -"""By default we don't log LLM inputs/outputs, to prevent exposing sensitive information. Set this -flag to enable logging them. -""" - -DONT_LOG_TOOL_DATA = _debug_flag_enabled("OPENAI_AGENTS_DONT_LOG_TOOL_DATA") -"""By default we don't log tool call inputs/outputs, to prevent exposing sensitive information. Set -this flag to enable logging them. -""" diff --git a/src/agents/_run_impl.py b/src/agents/_run_impl.py deleted file mode 100644 index 94c181b7..00000000 --- a/src/agents/_run_impl.py +++ /dev/null @@ -1,921 +0,0 @@ -from __future__ import annotations - -import asyncio -import dataclasses -import inspect -from collections.abc import Awaitable -from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, cast - -from openai.types.responses import ( - ResponseComputerToolCall, - ResponseFileSearchToolCall, - ResponseFunctionToolCall, - ResponseFunctionWebSearch, - ResponseOutputMessage, -) -from openai.types.responses.response_computer_tool_call import ( - ActionClick, - ActionDoubleClick, - ActionDrag, - ActionKeypress, - ActionMove, - ActionScreenshot, - ActionScroll, - ActionType, - ActionWait, -) -from openai.types.responses.response_input_param import ComputerCallOutput -from openai.types.responses.response_reasoning_item import ResponseReasoningItem - -from .agent import Agent, ToolsToFinalOutputResult -from .agent_output import AgentOutputSchema -from .computer import AsyncComputer, Computer -from .exceptions import AgentsException, ModelBehaviorError, UserError -from .guardrail import InputGuardrail, InputGuardrailResult, OutputGuardrail, OutputGuardrailResult -from .handoffs import Handoff, HandoffInputData -from .items import ( - HandoffCallItem, - HandoffOutputItem, - ItemHelpers, - MessageOutputItem, - ModelResponse, - ReasoningItem, - RunItem, - ToolCallItem, - ToolCallOutputItem, - TResponseInputItem, -) -from .lifecycle import RunHooks -from .logger import logger -from .model_settings import ModelSettings -from .models.interface import ModelTracing -from .run_context import RunContextWrapper, TContext -from .stream_events import RunItemStreamEvent, StreamEvent -from .tool import ComputerTool, FunctionTool, FunctionToolResult, Tool -from .tracing import ( - SpanError, - Trace, - function_span, - get_current_trace, - guardrail_span, - handoff_span, - trace, -) -from .util import _coro, _error_tracing - -if TYPE_CHECKING: - from .run import RunConfig - - -class QueueCompleteSentinel: - pass - - -QUEUE_COMPLETE_SENTINEL = QueueCompleteSentinel() - -_NOT_FINAL_OUTPUT = ToolsToFinalOutputResult(is_final_output=False, final_output=None) - - -@dataclass -class AgentToolUseTracker: - agent_to_tools: list[tuple[Agent, list[str]]] = field(default_factory=list) - """Tuple of (agent, list of tools used). Can't use a dict because agents aren't hashable.""" - - def add_tool_use(self, agent: Agent[Any], tool_names: list[str]) -> None: - existing_data = next((item for item in self.agent_to_tools if item[0] == agent), None) - if existing_data: - existing_data[1].extend(tool_names) - else: - self.agent_to_tools.append((agent, tool_names)) - - def has_used_tools(self, agent: Agent[Any]) -> bool: - existing_data = next((item for item in self.agent_to_tools if item[0] == agent), None) - return existing_data is not None and len(existing_data[1]) > 0 - - -@dataclass -class ToolRunHandoff: - handoff: Handoff - tool_call: ResponseFunctionToolCall - - -@dataclass -class ToolRunFunction: - tool_call: ResponseFunctionToolCall - function_tool: FunctionTool - - -@dataclass -class ToolRunComputerAction: - tool_call: ResponseComputerToolCall - computer_tool: ComputerTool - - -@dataclass -class ProcessedResponse: - new_items: list[RunItem] - handoffs: list[ToolRunHandoff] - functions: list[ToolRunFunction] - computer_actions: list[ToolRunComputerAction] - tools_used: list[str] # Names of all tools used, including hosted tools - - def has_tools_to_run(self) -> bool: - # Handoffs, functions and computer actions need local processing - # Hosted tools have already run, so there's nothing to do. - return any( - [ - self.handoffs, - self.functions, - self.computer_actions, - ] - ) - - -@dataclass -class NextStepHandoff: - new_agent: Agent[Any] - - -@dataclass -class NextStepFinalOutput: - output: Any - - -@dataclass -class NextStepRunAgain: - pass - - -@dataclass -class SingleStepResult: - original_input: str | list[TResponseInputItem] - """The input items i.e. the items before run() was called. May be mutated by handoff input - filters.""" - - model_response: ModelResponse - """The model response for the current step.""" - - pre_step_items: list[RunItem] - """Items generated before the current step.""" - - new_step_items: list[RunItem] - """Items generated during this current step.""" - - next_step: NextStepHandoff | NextStepFinalOutput | NextStepRunAgain - """The next step to take.""" - - @property - def generated_items(self) -> list[RunItem]: - """Items generated during the agent run (i.e. everything generated after - `original_input`).""" - return self.pre_step_items + self.new_step_items - - -def get_model_tracing_impl( - tracing_disabled: bool, trace_include_sensitive_data: bool -) -> ModelTracing: - if tracing_disabled: - return ModelTracing.DISABLED - elif trace_include_sensitive_data: - return ModelTracing.ENABLED - else: - return ModelTracing.ENABLED_WITHOUT_DATA - - -class RunImpl: - @classmethod - async def execute_tools_and_side_effects( - cls, - *, - agent: Agent[TContext], - # The original input to the Runner - original_input: str | list[TResponseInputItem], - # Everything generated by Runner since the original input, but before the current step - pre_step_items: list[RunItem], - new_response: ModelResponse, - processed_response: ProcessedResponse, - output_schema: AgentOutputSchema | None, - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - run_config: RunConfig, - ) -> SingleStepResult: - # Make a copy of the generated items - pre_step_items = list(pre_step_items) - - new_step_items: list[RunItem] = [] - new_step_items.extend(processed_response.new_items) - - # First, lets run the tool calls - function tools and computer actions - function_results, computer_results = await asyncio.gather( - cls.execute_function_tool_calls( - agent=agent, - tool_runs=processed_response.functions, - hooks=hooks, - context_wrapper=context_wrapper, - config=run_config, - ), - cls.execute_computer_actions( - agent=agent, - actions=processed_response.computer_actions, - hooks=hooks, - context_wrapper=context_wrapper, - config=run_config, - ), - ) - new_step_items.extend([result.run_item for result in function_results]) - new_step_items.extend(computer_results) - - # Second, check if there are any handoffs - if run_handoffs := processed_response.handoffs: - return await cls.execute_handoffs( - agent=agent, - original_input=original_input, - pre_step_items=pre_step_items, - new_step_items=new_step_items, - new_response=new_response, - run_handoffs=run_handoffs, - hooks=hooks, - context_wrapper=context_wrapper, - run_config=run_config, - ) - - # Third, we'll check if the tool use should result in a final output - check_tool_use = await cls._check_for_final_output_from_tools( - agent=agent, - tool_results=function_results, - context_wrapper=context_wrapper, - config=run_config, - ) - - if check_tool_use.is_final_output: - # If the output type is str, then let's just stringify it - if not agent.output_type or agent.output_type is str: - check_tool_use.final_output = str(check_tool_use.final_output) - - if check_tool_use.final_output is None: - logger.error( - "Model returned a final output of None. Not raising an error because we assume" - "you know what you're doing." - ) - - return await cls.execute_final_output( - agent=agent, - original_input=original_input, - new_response=new_response, - pre_step_items=pre_step_items, - new_step_items=new_step_items, - final_output=check_tool_use.final_output, - hooks=hooks, - context_wrapper=context_wrapper, - ) - - # Now we can check if the model also produced a final output - message_items = [item for item in new_step_items if isinstance(item, MessageOutputItem)] - - # We'll use the last content output as the final output - potential_final_output_text = ( - ItemHelpers.extract_last_text(message_items[-1].raw_item) if message_items else None - ) - - # There are two possibilities that lead to a final output: - # 1. Structured output schema => always leads to a final output - # 2. Plain text output schema => only leads to a final output if there are no tool calls - if output_schema and not output_schema.is_plain_text() and potential_final_output_text: - final_output = output_schema.validate_json(potential_final_output_text) - return await cls.execute_final_output( - agent=agent, - original_input=original_input, - new_response=new_response, - pre_step_items=pre_step_items, - new_step_items=new_step_items, - final_output=final_output, - hooks=hooks, - context_wrapper=context_wrapper, - ) - elif ( - not output_schema or output_schema.is_plain_text() - ) and not processed_response.has_tools_to_run(): - return await cls.execute_final_output( - agent=agent, - original_input=original_input, - new_response=new_response, - pre_step_items=pre_step_items, - new_step_items=new_step_items, - final_output=potential_final_output_text or "", - hooks=hooks, - context_wrapper=context_wrapper, - ) - else: - # If there's no final output, we can just run again - return SingleStepResult( - original_input=original_input, - model_response=new_response, - pre_step_items=pre_step_items, - new_step_items=new_step_items, - next_step=NextStepRunAgain(), - ) - - @classmethod - def maybe_reset_tool_choice( - cls, agent: Agent[Any], tool_use_tracker: AgentToolUseTracker, model_settings: ModelSettings - ) -> ModelSettings: - """Resets tool choice to None if the agent has used tools and the agent's reset_tool_choice - flag is True.""" - - if agent.reset_tool_choice is True and tool_use_tracker.has_used_tools(agent): - return dataclasses.replace(model_settings, tool_choice=None) - - return model_settings - - @classmethod - def process_model_response( - cls, - *, - agent: Agent[Any], - all_tools: list[Tool], - response: ModelResponse, - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - ) -> ProcessedResponse: - items: list[RunItem] = [] - - run_handoffs = [] - functions = [] - computer_actions = [] - tools_used: list[str] = [] - handoff_map = {handoff.tool_name: handoff for handoff in handoffs} - function_map = {tool.name: tool for tool in all_tools if isinstance(tool, FunctionTool)} - computer_tool = next((tool for tool in all_tools if isinstance(tool, ComputerTool)), None) - - for output in response.output: - if isinstance(output, ResponseOutputMessage): - items.append(MessageOutputItem(raw_item=output, agent=agent)) - elif isinstance(output, ResponseFileSearchToolCall): - items.append(ToolCallItem(raw_item=output, agent=agent)) - tools_used.append("file_search") - elif isinstance(output, ResponseFunctionWebSearch): - items.append(ToolCallItem(raw_item=output, agent=agent)) - tools_used.append("web_search") - elif isinstance(output, ResponseReasoningItem): - items.append(ReasoningItem(raw_item=output, agent=agent)) - elif isinstance(output, ResponseComputerToolCall): - items.append(ToolCallItem(raw_item=output, agent=agent)) - tools_used.append("computer_use") - if not computer_tool: - _error_tracing.attach_error_to_current_span( - SpanError( - message="Computer tool not found", - data={}, - ) - ) - raise ModelBehaviorError( - "Model produced computer action without a computer tool." - ) - computer_actions.append( - ToolRunComputerAction(tool_call=output, computer_tool=computer_tool) - ) - elif not isinstance(output, ResponseFunctionToolCall): - logger.warning(f"Unexpected output type, ignoring: {type(output)}") - continue - - # At this point we know it's a function tool call - if not isinstance(output, ResponseFunctionToolCall): - continue - - tools_used.append(output.name) - - # Handoffs - if output.name in handoff_map: - items.append(HandoffCallItem(raw_item=output, agent=agent)) - handoff = ToolRunHandoff( - tool_call=output, - handoff=handoff_map[output.name], - ) - run_handoffs.append(handoff) - # Regular function tool call - else: - if output.name not in function_map: - _error_tracing.attach_error_to_current_span( - SpanError( - message="Tool not found", - data={"tool_name": output.name}, - ) - ) - raise ModelBehaviorError(f"Tool {output.name} not found in agent {agent.name}") - items.append(ToolCallItem(raw_item=output, agent=agent)) - functions.append( - ToolRunFunction( - tool_call=output, - function_tool=function_map[output.name], - ) - ) - - return ProcessedResponse( - new_items=items, - handoffs=run_handoffs, - functions=functions, - computer_actions=computer_actions, - tools_used=tools_used, - ) - - @classmethod - async def execute_function_tool_calls( - cls, - *, - agent: Agent[TContext], - tool_runs: list[ToolRunFunction], - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - config: RunConfig, - ) -> list[FunctionToolResult]: - async def run_single_tool( - func_tool: FunctionTool, tool_call: ResponseFunctionToolCall - ) -> Any: - with function_span(func_tool.name) as span_fn: - if config.trace_include_sensitive_data: - span_fn.span_data.input = tool_call.arguments - try: - _, _, result = await asyncio.gather( - hooks.on_tool_start(context_wrapper, agent, func_tool), - ( - agent.hooks.on_tool_start(context_wrapper, agent, func_tool) - if agent.hooks - else _coro.noop_coroutine() - ), - func_tool.on_invoke_tool(context_wrapper, tool_call.arguments), - ) - - await asyncio.gather( - hooks.on_tool_end(context_wrapper, agent, func_tool, result), - ( - agent.hooks.on_tool_end(context_wrapper, agent, func_tool, result) - if agent.hooks - else _coro.noop_coroutine() - ), - ) - except Exception as e: - _error_tracing.attach_error_to_current_span( - SpanError( - message="Error running tool", - data={"tool_name": func_tool.name, "error": str(e)}, - ) - ) - if isinstance(e, AgentsException): - raise e - raise UserError(f"Error running tool {func_tool.name}: {e}") from e - - if config.trace_include_sensitive_data: - span_fn.span_data.output = result - return result - - tasks = [] - for tool_run in tool_runs: - function_tool = tool_run.function_tool - tasks.append(run_single_tool(function_tool, tool_run.tool_call)) - - results = await asyncio.gather(*tasks) - - return [ - FunctionToolResult( - tool=tool_run.function_tool, - output=result, - run_item=ToolCallOutputItem( - output=result, - raw_item=ItemHelpers.tool_call_output_item(tool_run.tool_call, str(result)), - agent=agent, - ), - ) - for tool_run, result in zip(tool_runs, results) - ] - - @classmethod - async def execute_computer_actions( - cls, - *, - agent: Agent[TContext], - actions: list[ToolRunComputerAction], - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - config: RunConfig, - ) -> list[RunItem]: - results: list[RunItem] = [] - # Need to run these serially, because each action can affect the computer state - for action in actions: - results.append( - await ComputerAction.execute( - agent=agent, - action=action, - hooks=hooks, - context_wrapper=context_wrapper, - config=config, - ) - ) - - return results - - @classmethod - async def execute_handoffs( - cls, - *, - agent: Agent[TContext], - original_input: str | list[TResponseInputItem], - pre_step_items: list[RunItem], - new_step_items: list[RunItem], - new_response: ModelResponse, - run_handoffs: list[ToolRunHandoff], - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - run_config: RunConfig, - ) -> SingleStepResult: - # If there is more than one handoff, add tool responses that reject those handoffs - multiple_handoffs = len(run_handoffs) > 1 - if multiple_handoffs: - output_message = "Multiple handoffs detected, ignoring this one." - new_step_items.extend( - [ - ToolCallOutputItem( - output=output_message, - raw_item=ItemHelpers.tool_call_output_item( - handoff.tool_call, output_message - ), - agent=agent, - ) - for handoff in run_handoffs[1:] - ] - ) - - actual_handoff = run_handoffs[0] - with handoff_span(from_agent=agent.name) as span_handoff: - handoff = actual_handoff.handoff - new_agent: Agent[Any] = await handoff.on_invoke_handoff( - context_wrapper, actual_handoff.tool_call.arguments - ) - span_handoff.span_data.to_agent = new_agent.name - if multiple_handoffs: - requested_agents = [handoff.handoff.agent_name for handoff in run_handoffs] - span_handoff.set_error( - SpanError( - message="Multiple handoffs requested", - data={ - "requested_agents": requested_agents, - }, - ) - ) - - # Append a tool output item for the handoff - new_step_items.append( - HandoffOutputItem( - agent=agent, - raw_item=ItemHelpers.tool_call_output_item( - actual_handoff.tool_call, - handoff.get_transfer_message(new_agent), - ), - source_agent=agent, - target_agent=new_agent, - ) - ) - - # Execute handoff hooks - await asyncio.gather( - hooks.on_handoff( - context=context_wrapper, - from_agent=agent, - to_agent=new_agent, - ), - ( - agent.hooks.on_handoff( - context_wrapper, - agent=new_agent, - source=agent, - ) - if agent.hooks - else _coro.noop_coroutine() - ), - ) - - # If there's an input filter, filter the input for the next agent - input_filter = handoff.input_filter or ( - run_config.handoff_input_filter if run_config else None - ) - if input_filter: - logger.debug("Filtering inputs for handoff") - handoff_input_data = HandoffInputData( - input_history=tuple(original_input) - if isinstance(original_input, list) - else original_input, - pre_handoff_items=tuple(pre_step_items), - new_items=tuple(new_step_items), - ) - if not callable(input_filter): - _error_tracing.attach_error_to_span( - span_handoff, - SpanError( - message="Invalid input filter", - data={"details": "not callable()"}, - ), - ) - raise UserError(f"Invalid input filter: {input_filter}") - filtered = input_filter(handoff_input_data) - if not isinstance(filtered, HandoffInputData): - _error_tracing.attach_error_to_span( - span_handoff, - SpanError( - message="Invalid input filter result", - data={"details": "not a HandoffInputData"}, - ), - ) - raise UserError(f"Invalid input filter result: {filtered}") - - original_input = ( - filtered.input_history - if isinstance(filtered.input_history, str) - else list(filtered.input_history) - ) - pre_step_items = list(filtered.pre_handoff_items) - new_step_items = list(filtered.new_items) - - return SingleStepResult( - original_input=original_input, - model_response=new_response, - pre_step_items=pre_step_items, - new_step_items=new_step_items, - next_step=NextStepHandoff(new_agent), - ) - - @classmethod - async def execute_final_output( - cls, - *, - agent: Agent[TContext], - original_input: str | list[TResponseInputItem], - new_response: ModelResponse, - pre_step_items: list[RunItem], - new_step_items: list[RunItem], - final_output: Any, - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - ) -> SingleStepResult: - # Run the on_end hooks - await cls.run_final_output_hooks(agent, hooks, context_wrapper, final_output) - - return SingleStepResult( - original_input=original_input, - model_response=new_response, - pre_step_items=pre_step_items, - new_step_items=new_step_items, - next_step=NextStepFinalOutput(final_output), - ) - - @classmethod - async def run_final_output_hooks( - cls, - agent: Agent[TContext], - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - final_output: Any, - ): - await asyncio.gather( - hooks.on_agent_end(context_wrapper, agent, final_output), - agent.hooks.on_end(context_wrapper, agent, final_output) - if agent.hooks - else _coro.noop_coroutine(), - ) - - @classmethod - async def run_single_input_guardrail( - cls, - agent: Agent[Any], - guardrail: InputGuardrail[TContext], - input: str | list[TResponseInputItem], - context: RunContextWrapper[TContext], - ) -> InputGuardrailResult: - with guardrail_span(guardrail.get_name()) as span_guardrail: - result = await guardrail.run(agent, input, context) - span_guardrail.span_data.triggered = result.output.tripwire_triggered - return result - - @classmethod - async def run_single_output_guardrail( - cls, - guardrail: OutputGuardrail[TContext], - agent: Agent[Any], - agent_output: Any, - context: RunContextWrapper[TContext], - ) -> OutputGuardrailResult: - with guardrail_span(guardrail.get_name()) as span_guardrail: - result = await guardrail.run(agent=agent, agent_output=agent_output, context=context) - span_guardrail.span_data.triggered = result.output.tripwire_triggered - return result - - @classmethod - def stream_step_result_to_queue( - cls, - step_result: SingleStepResult, - queue: asyncio.Queue[StreamEvent | QueueCompleteSentinel], - ): - for item in step_result.new_step_items: - if isinstance(item, MessageOutputItem): - event = RunItemStreamEvent(item=item, name="message_output_created") - elif isinstance(item, HandoffCallItem): - event = RunItemStreamEvent(item=item, name="handoff_requested") - elif isinstance(item, HandoffOutputItem): - event = RunItemStreamEvent(item=item, name="handoff_occured") - elif isinstance(item, ToolCallItem): - event = RunItemStreamEvent(item=item, name="tool_called") - elif isinstance(item, ToolCallOutputItem): - event = RunItemStreamEvent(item=item, name="tool_output") - elif isinstance(item, ReasoningItem): - event = RunItemStreamEvent(item=item, name="reasoning_item_created") - else: - logger.warning(f"Unexpected item type: {type(item)}") - event = None - - if event: - queue.put_nowait(event) - - @classmethod - async def _check_for_final_output_from_tools( - cls, - *, - agent: Agent[TContext], - tool_results: list[FunctionToolResult], - context_wrapper: RunContextWrapper[TContext], - config: RunConfig, - ) -> ToolsToFinalOutputResult: - """Returns (i, final_output).""" - if not tool_results: - return _NOT_FINAL_OUTPUT - - if agent.tool_use_behavior == "run_llm_again": - return _NOT_FINAL_OUTPUT - elif agent.tool_use_behavior == "stop_on_first_tool": - return ToolsToFinalOutputResult( - is_final_output=True, final_output=tool_results[0].output - ) - elif isinstance(agent.tool_use_behavior, dict): - names = agent.tool_use_behavior.get("stop_at_tool_names", []) - for tool_result in tool_results: - if tool_result.tool.name in names: - return ToolsToFinalOutputResult( - is_final_output=True, final_output=tool_result.output - ) - return ToolsToFinalOutputResult(is_final_output=False, final_output=None) - elif callable(agent.tool_use_behavior): - if inspect.iscoroutinefunction(agent.tool_use_behavior): - return await cast( - Awaitable[ToolsToFinalOutputResult], - agent.tool_use_behavior(context_wrapper, tool_results), - ) - else: - return cast( - ToolsToFinalOutputResult, agent.tool_use_behavior(context_wrapper, tool_results) - ) - - logger.error(f"Invalid tool_use_behavior: {agent.tool_use_behavior}") - raise UserError(f"Invalid tool_use_behavior: {agent.tool_use_behavior}") - - -class TraceCtxManager: - """Creates a trace only if there is no current trace, and manages the trace lifecycle.""" - - def __init__( - self, - workflow_name: str, - trace_id: str | None, - group_id: str | None, - metadata: dict[str, Any] | None, - disabled: bool, - ): - self.trace: Trace | None = None - self.workflow_name = workflow_name - self.trace_id = trace_id - self.group_id = group_id - self.metadata = metadata - self.disabled = disabled - - def __enter__(self) -> TraceCtxManager: - current_trace = get_current_trace() - if not current_trace: - self.trace = trace( - workflow_name=self.workflow_name, - trace_id=self.trace_id, - group_id=self.group_id, - metadata=self.metadata, - disabled=self.disabled, - ) - self.trace.start(mark_as_current=True) - - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if self.trace: - self.trace.finish(reset_current=True) - - -class ComputerAction: - @classmethod - async def execute( - cls, - *, - agent: Agent[TContext], - action: ToolRunComputerAction, - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - config: RunConfig, - ) -> RunItem: - output_func = ( - cls._get_screenshot_async(action.computer_tool.computer, action.tool_call) - if isinstance(action.computer_tool.computer, AsyncComputer) - else cls._get_screenshot_sync(action.computer_tool.computer, action.tool_call) - ) - - _, _, output = await asyncio.gather( - hooks.on_tool_start(context_wrapper, agent, action.computer_tool), - ( - agent.hooks.on_tool_start(context_wrapper, agent, action.computer_tool) - if agent.hooks - else _coro.noop_coroutine() - ), - output_func, - ) - - await asyncio.gather( - hooks.on_tool_end(context_wrapper, agent, action.computer_tool, output), - ( - agent.hooks.on_tool_end(context_wrapper, agent, action.computer_tool, output) - if agent.hooks - else _coro.noop_coroutine() - ), - ) - - # TODO: don't send a screenshot every single time, use references - image_url = f"data:image/png;base64,{output}" - return ToolCallOutputItem( - agent=agent, - output=image_url, - raw_item=ComputerCallOutput( - call_id=action.tool_call.call_id, - output={ - "type": "computer_screenshot", - "image_url": image_url, - }, - type="computer_call_output", - ), - ) - - @classmethod - async def _get_screenshot_sync( - cls, - computer: Computer, - tool_call: ResponseComputerToolCall, - ) -> str: - action = tool_call.action - if isinstance(action, ActionClick): - computer.click(action.x, action.y, action.button) - elif isinstance(action, ActionDoubleClick): - computer.double_click(action.x, action.y) - elif isinstance(action, ActionDrag): - computer.drag([(p.x, p.y) for p in action.path]) - elif isinstance(action, ActionKeypress): - computer.keypress(action.keys) - elif isinstance(action, ActionMove): - computer.move(action.x, action.y) - elif isinstance(action, ActionScreenshot): - computer.screenshot() - elif isinstance(action, ActionScroll): - computer.scroll(action.x, action.y, action.scroll_x, action.scroll_y) - elif isinstance(action, ActionType): - computer.type(action.text) - elif isinstance(action, ActionWait): - computer.wait() - - return computer.screenshot() - - @classmethod - async def _get_screenshot_async( - cls, - computer: AsyncComputer, - tool_call: ResponseComputerToolCall, - ) -> str: - action = tool_call.action - if isinstance(action, ActionClick): - await computer.click(action.x, action.y, action.button) - elif isinstance(action, ActionDoubleClick): - await computer.double_click(action.x, action.y) - elif isinstance(action, ActionDrag): - await computer.drag([(p.x, p.y) for p in action.path]) - elif isinstance(action, ActionKeypress): - await computer.keypress(action.keys) - elif isinstance(action, ActionMove): - await computer.move(action.x, action.y) - elif isinstance(action, ActionScreenshot): - await computer.screenshot() - elif isinstance(action, ActionScroll): - await computer.scroll(action.x, action.y, action.scroll_x, action.scroll_y) - elif isinstance(action, ActionType): - await computer.type(action.text) - elif isinstance(action, ActionWait): - await computer.wait() - - return await computer.screenshot() diff --git a/src/agents/agent.py b/src/agents/agent.py deleted file mode 100644 index a24456b0..00000000 --- a/src/agents/agent.py +++ /dev/null @@ -1,245 +0,0 @@ -from __future__ import annotations - -import dataclasses -import inspect -from collections.abc import Awaitable -from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, cast - -from typing_extensions import NotRequired, TypeAlias, TypedDict - -from .guardrail import InputGuardrail, OutputGuardrail -from .handoffs import Handoff -from .items import ItemHelpers -from .logger import logger -from .mcp import MCPUtil -from .model_settings import ModelSettings -from .models.interface import Model -from .run_context import RunContextWrapper, TContext -from .tool import FunctionToolResult, Tool, function_tool -from .util import _transforms -from .util._types import MaybeAwaitable - -if TYPE_CHECKING: - from .lifecycle import AgentHooks - from .mcp import MCPServer - from .result import RunResult - - -@dataclass -class ToolsToFinalOutputResult: - is_final_output: bool - """Whether this is the final output. If False, the LLM will run again and receive the tool call - output. - """ - - final_output: Any | None = None - """The final output. Can be None if `is_final_output` is False, otherwise must match the - `output_type` of the agent. - """ - - -ToolsToFinalOutputFunction: TypeAlias = Callable[ - [RunContextWrapper[TContext], list[FunctionToolResult]], - MaybeAwaitable[ToolsToFinalOutputResult], -] -"""A function that takes a run context and a list of tool results, and returns a -`ToolsToFinalOutputResult`. -""" - - -class StopAtTools(TypedDict): - stop_at_tool_names: list[str] - """A list of tool names, any of which will stop the agent from running further.""" - - -class MCPConfig(TypedDict): - """Configuration for MCP servers.""" - - convert_schemas_to_strict: NotRequired[bool] - """If True, we will attempt to convert the MCP schemas to strict-mode schemas. This is a - best-effort conversion, so some schemas may not be convertible. Defaults to False. - """ - - -@dataclass -class Agent(Generic[TContext]): - """An agent is an AI model configured with instructions, tools, guardrails, handoffs and more. - - We strongly recommend passing `instructions`, which is the "system prompt" for the agent. In - addition, you can pass `handoff_description`, which is a human-readable description of the - agent, used when the agent is used inside tools/handoffs. - - Agents are generic on the context type. The context is a (mutable) object you create. It is - passed to tool functions, handoffs, guardrails, etc. - """ - - name: str - """The name of the agent.""" - - instructions: ( - str - | Callable[ - [RunContextWrapper[TContext], Agent[TContext]], - MaybeAwaitable[str], - ] - | None - ) = None - """The instructions for the agent. Will be used as the "system prompt" when this agent is - invoked. Describes what the agent should do, and how it responds. - - Can either be a string, or a function that dynamically generates instructions for the agent. If - you provide a function, it will be called with the context and the agent instance. It must - return a string. - """ - - handoff_description: str | None = None - """A description of the agent. This is used when the agent is used as a handoff, so that an - LLM knows what it does and when to invoke it. - """ - - handoffs: list[Agent[Any] | Handoff[TContext]] = field(default_factory=list) - """Handoffs are sub-agents that the agent can delegate to. You can provide a list of handoffs, - and the agent can choose to delegate to them if relevant. Allows for separation of concerns and - modularity. - """ - - model: str | Model | None = None - """The model implementation to use when invoking the LLM. - - By default, if not set, the agent will use the default model configured in - `openai_provider.DEFAULT_MODEL` (currently "gpt-4o"). - """ - - model_settings: ModelSettings = field(default_factory=ModelSettings) - """Configures model-specific tuning parameters (e.g. temperature, top_p). - """ - - tools: list[Tool] = field(default_factory=list) - """A list of tools that the agent can use.""" - - mcp_servers: list[MCPServer] = field(default_factory=list) - """A list of [Model Context Protocol](https://modelcontextprotocol.io/) servers that - the agent can use. Every time the agent runs, it will include tools from these servers in the - list of available tools. - - NOTE: You are expected to manage the lifecycle of these servers. Specifically, you must call - `server.connect()` before passing it to the agent, and `server.cleanup()` when the server is no - longer needed. - """ - - mcp_config: MCPConfig = field(default_factory=lambda: MCPConfig()) - """Configuration for MCP servers.""" - - input_guardrails: list[InputGuardrail[TContext]] = field(default_factory=list) - """A list of checks that run in parallel to the agent's execution, before generating a - response. Runs only if the agent is the first agent in the chain. - """ - - output_guardrails: list[OutputGuardrail[TContext]] = field(default_factory=list) - """A list of checks that run on the final output of the agent, after generating a response. - Runs only if the agent produces a final output. - """ - - output_type: type[Any] | None = None - """The type of the output object. If not provided, the output will be `str`.""" - - hooks: AgentHooks[TContext] | None = None - """A class that receives callbacks on various lifecycle events for this agent. - """ - - tool_use_behavior: ( - Literal["run_llm_again", "stop_on_first_tool"] | StopAtTools | ToolsToFinalOutputFunction - ) = "run_llm_again" - """This lets you configure how tool use is handled. - - "run_llm_again": The default behavior. Tools are run, and then the LLM receives the results - and gets to respond. - - "stop_on_first_tool": The output of the first tool call is used as the final output. This - means that the LLM does not process the result of the tool call. - - A list of tool names: The agent will stop running if any of the tools in the list are called. - The final output will be the output of the first matching tool call. The LLM does not - process the result of the tool call. - - A function: If you pass a function, it will be called with the run context and the list of - tool results. It must return a `ToolToFinalOutputResult`, which determines whether the tool - calls result in a final output. - - NOTE: This configuration is specific to FunctionTools. Hosted tools, such as file search, - web search, etc are always processed by the LLM. - """ - - reset_tool_choice: bool = True - """Whether to reset the tool choice to the default value after a tool has been called. Defaults - to True. This ensures that the agent doesn't enter an infinite loop of tool usage.""" - - def clone(self, **kwargs: Any) -> Agent[TContext]: - """Make a copy of the agent, with the given arguments changed. For example, you could do: - ``` - new_agent = agent.clone(instructions="New instructions") - ``` - """ - return dataclasses.replace(self, **kwargs) - - def as_tool( - self, - tool_name: str | None, - tool_description: str | None, - custom_output_extractor: Callable[[RunResult], Awaitable[str]] | None = None, - ) -> Tool: - """Transform this agent into a tool, callable by other agents. - - This is different from handoffs in two ways: - 1. In handoffs, the new agent receives the conversation history. In this tool, the new agent - receives generated input. - 2. In handoffs, the new agent takes over the conversation. In this tool, the new agent is - called as a tool, and the conversation is continued by the original agent. - - Args: - tool_name: The name of the tool. If not provided, the agent's name will be used. - tool_description: The description of the tool, which should indicate what it does and - when to use it. - custom_output_extractor: A function that extracts the output from the agent. If not - provided, the last message from the agent will be used. - """ - - @function_tool( - name_override=tool_name or _transforms.transform_string_function_style(self.name), - description_override=tool_description or "", - ) - async def run_agent(context: RunContextWrapper, input: str) -> str: - from .run import Runner - - output = await Runner.run( - starting_agent=self, - input=input, - context=context.context, - ) - if custom_output_extractor: - return await custom_output_extractor(output) - - return ItemHelpers.text_message_outputs(output.new_items) - - return run_agent - - async def get_system_prompt(self, run_context: RunContextWrapper[TContext]) -> str | None: - """Get the system prompt for the agent.""" - if isinstance(self.instructions, str): - return self.instructions - elif callable(self.instructions): - if inspect.iscoroutinefunction(self.instructions): - return await cast(Awaitable[str], self.instructions(run_context, self)) - else: - return cast(str, self.instructions(run_context, self)) - elif self.instructions is not None: - logger.error(f"Instructions must be a string or a function, got {self.instructions}") - - return None - - async def get_mcp_tools(self) -> list[Tool]: - """Fetches the available tools from the MCP servers.""" - convert_schemas_to_strict = self.mcp_config.get("convert_schemas_to_strict", False) - return await MCPUtil.get_all_function_tools(self.mcp_servers, convert_schemas_to_strict) - - async def get_all_tools(self) -> list[Tool]: - """All agent tools, including MCP tools and function tools.""" - mcp_tools = await self.get_mcp_tools() - return mcp_tools + self.tools diff --git a/src/agents/computer.py b/src/agents/computer.py deleted file mode 100644 index 1b9224d5..00000000 --- a/src/agents/computer.py +++ /dev/null @@ -1,107 +0,0 @@ -import abc -from typing import Literal - -Environment = Literal["mac", "windows", "ubuntu", "browser"] -Button = Literal["left", "right", "wheel", "back", "forward"] - - -class Computer(abc.ABC): - """A computer implemented with sync operations. The Computer interface abstracts the - operations needed to control a computer or browser.""" - - @property - @abc.abstractmethod - def environment(self) -> Environment: - pass - - @property - @abc.abstractmethod - def dimensions(self) -> tuple[int, int]: - pass - - @abc.abstractmethod - def screenshot(self) -> str: - pass - - @abc.abstractmethod - def click(self, x: int, y: int, button: Button) -> None: - pass - - @abc.abstractmethod - def double_click(self, x: int, y: int) -> None: - pass - - @abc.abstractmethod - def scroll(self, x: int, y: int, scroll_x: int, scroll_y: int) -> None: - pass - - @abc.abstractmethod - def type(self, text: str) -> None: - pass - - @abc.abstractmethod - def wait(self) -> None: - pass - - @abc.abstractmethod - def move(self, x: int, y: int) -> None: - pass - - @abc.abstractmethod - def keypress(self, keys: list[str]) -> None: - pass - - @abc.abstractmethod - def drag(self, path: list[tuple[int, int]]) -> None: - pass - - -class AsyncComputer(abc.ABC): - """A computer implemented with async operations. The Computer interface abstracts the - operations needed to control a computer or browser.""" - - @property - @abc.abstractmethod - def environment(self) -> Environment: - pass - - @property - @abc.abstractmethod - def dimensions(self) -> tuple[int, int]: - pass - - @abc.abstractmethod - async def screenshot(self) -> str: - pass - - @abc.abstractmethod - async def click(self, x: int, y: int, button: Button) -> None: - pass - - @abc.abstractmethod - async def double_click(self, x: int, y: int) -> None: - pass - - @abc.abstractmethod - async def scroll(self, x: int, y: int, scroll_x: int, scroll_y: int) -> None: - pass - - @abc.abstractmethod - async def type(self, text: str) -> None: - pass - - @abc.abstractmethod - async def wait(self) -> None: - pass - - @abc.abstractmethod - async def move(self, x: int, y: int) -> None: - pass - - @abc.abstractmethod - async def keypress(self, keys: list[str]) -> None: - pass - - @abc.abstractmethod - async def drag(self, path: list[tuple[int, int]]) -> None: - pass diff --git a/src/agents/exceptions.py b/src/agents/exceptions.py deleted file mode 100644 index 78898f01..00000000 --- a/src/agents/exceptions.py +++ /dev/null @@ -1,63 +0,0 @@ -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from .guardrail import InputGuardrailResult, OutputGuardrailResult - - -class AgentsException(Exception): - """Base class for all exceptions in the Agents SDK.""" - - -class MaxTurnsExceeded(AgentsException): - """Exception raised when the maximum number of turns is exceeded.""" - - message: str - - def __init__(self, message: str): - self.message = message - - -class ModelBehaviorError(AgentsException): - """Exception raised when the model does something unexpected, e.g. calling a tool that doesn't - exist, or providing malformed JSON. - """ - - message: str - - def __init__(self, message: str): - self.message = message - - -class UserError(AgentsException): - """Exception raised when the user makes an error using the SDK.""" - - message: str - - def __init__(self, message: str): - self.message = message - - -class InputGuardrailTripwireTriggered(AgentsException): - """Exception raised when a guardrail tripwire is triggered.""" - - guardrail_result: "InputGuardrailResult" - """The result data of the guardrail that was triggered.""" - - def __init__(self, guardrail_result: "InputGuardrailResult"): - self.guardrail_result = guardrail_result - super().__init__( - f"Guardrail {guardrail_result.guardrail.__class__.__name__} triggered tripwire" - ) - - -class OutputGuardrailTripwireTriggered(AgentsException): - """Exception raised when a guardrail tripwire is triggered.""" - - guardrail_result: "OutputGuardrailResult" - """The result data of the guardrail that was triggered.""" - - def __init__(self, guardrail_result: "OutputGuardrailResult"): - self.guardrail_result = guardrail_result - super().__init__( - f"Guardrail {guardrail_result.guardrail.__class__.__name__} triggered tripwire" - ) diff --git a/src/agents/extensions/handoff_filters.py b/src/agents/extensions/handoff_filters.py deleted file mode 100644 index f4f9b8bf..00000000 --- a/src/agents/extensions/handoff_filters.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import annotations - -from ..handoffs import HandoffInputData -from ..items import ( - HandoffCallItem, - HandoffOutputItem, - RunItem, - ToolCallItem, - ToolCallOutputItem, - TResponseInputItem, -) - -"""Contains common handoff input filters, for convenience. """ - - -def remove_all_tools(handoff_input_data: HandoffInputData) -> HandoffInputData: - """Filters out all tool items: file search, web search and function calls+output.""" - - history = handoff_input_data.input_history - new_items = handoff_input_data.new_items - - filtered_history = ( - _remove_tool_types_from_input(history) if isinstance(history, tuple) else history - ) - filtered_pre_handoff_items = _remove_tools_from_items(handoff_input_data.pre_handoff_items) - filtered_new_items = _remove_tools_from_items(new_items) - - return HandoffInputData( - input_history=filtered_history, - pre_handoff_items=filtered_pre_handoff_items, - new_items=filtered_new_items, - ) - - -def _remove_tools_from_items(items: tuple[RunItem, ...]) -> tuple[RunItem, ...]: - filtered_items = [] - for item in items: - if ( - isinstance(item, HandoffCallItem) - or isinstance(item, HandoffOutputItem) - or isinstance(item, ToolCallItem) - or isinstance(item, ToolCallOutputItem) - ): - continue - filtered_items.append(item) - return tuple(filtered_items) - - -def _remove_tool_types_from_input( - items: tuple[TResponseInputItem, ...], -) -> tuple[TResponseInputItem, ...]: - tool_types = [ - "function_call", - "function_call_output", - "computer_call", - "computer_call_output", - "file_search_call", - "web_search_call", - ] - - filtered_items: list[TResponseInputItem] = [] - for item in items: - itype = item.get("type") - if itype in tool_types: - continue - filtered_items.append(item) - return tuple(filtered_items) diff --git a/src/agents/extensions/handoff_prompt.py b/src/agents/extensions/handoff_prompt.py deleted file mode 100644 index cfb5ca7e..00000000 --- a/src/agents/extensions/handoff_prompt.py +++ /dev/null @@ -1,19 +0,0 @@ -# A recommended prompt prefix for agents that use handoffs. We recommend including this or -# similar instructions in any agents that use handoffs. -RECOMMENDED_PROMPT_PREFIX = ( - "# System context\n" - "You are part of a multi-agent system called the Agents SDK, designed to make agent " - "coordination and execution easy. Agents uses two primary abstraction: **Agents** and " - "**Handoffs**. An agent encompasses instructions and tools and can hand off a " - "conversation to another agent when appropriate. " - "Handoffs are achieved by calling a handoff function, generally named " - "`transfer_to_`. Transfers between agents are handled seamlessly in the background;" - " do not mention or draw attention to these transfers in your conversation with the user.\n" -) - - -def prompt_with_handoff_instructions(prompt: str) -> str: - """ - Add recommended instructions to the prompt for agents that use handoffs. - """ - return f"{RECOMMENDED_PROMPT_PREFIX}\n\n{prompt}" diff --git a/src/agents/extensions/visualization.py b/src/agents/extensions/visualization.py deleted file mode 100644 index 5fb35062..00000000 --- a/src/agents/extensions/visualization.py +++ /dev/null @@ -1,137 +0,0 @@ -from typing import Optional - -import graphviz # type: ignore - -from agents import Agent -from agents.handoffs import Handoff -from agents.tool import Tool - - -def get_main_graph(agent: Agent) -> str: - """ - Generates the main graph structure in DOT format for the given agent. - - Args: - agent (Agent): The agent for which the graph is to be generated. - - Returns: - str: The DOT format string representing the graph. - """ - parts = [ - """ - digraph G { - graph [splines=true]; - node [fontname="Arial"]; - edge [penwidth=1.5]; - """ - ] - parts.append(get_all_nodes(agent)) - parts.append(get_all_edges(agent)) - parts.append("}") - return "".join(parts) - - -def get_all_nodes(agent: Agent, parent: Optional[Agent] = None) -> str: - """ - Recursively generates the nodes for the given agent and its handoffs in DOT format. - - Args: - agent (Agent): The agent for which the nodes are to be generated. - - Returns: - str: The DOT format string representing the nodes. - """ - parts = [] - - # Start and end the graph - parts.append( - '"__start__" [label="__start__", shape=ellipse, style=filled, ' - "fillcolor=lightblue, width=0.5, height=0.3];" - '"__end__" [label="__end__", shape=ellipse, style=filled, ' - "fillcolor=lightblue, width=0.5, height=0.3];" - ) - # Ensure parent agent node is colored - if not parent: - parts.append( - f'"{agent.name}" [label="{agent.name}", shape=box, style=filled, ' - "fillcolor=lightyellow, width=1.5, height=0.8];" - ) - - for tool in agent.tools: - parts.append( - f'"{tool.name}" [label="{tool.name}", shape=ellipse, style=filled, ' - f"fillcolor=lightgreen, width=0.5, height=0.3];" - ) - - for handoff in agent.handoffs: - if isinstance(handoff, Handoff): - parts.append( - f'"{handoff.agent_name}" [label="{handoff.agent_name}", ' - f"shape=box, style=filled, style=rounded, " - f"fillcolor=lightyellow, width=1.5, height=0.8];" - ) - if isinstance(handoff, Agent): - parts.append( - f'"{handoff.name}" [label="{handoff.name}", ' - f"shape=box, style=filled, style=rounded, " - f"fillcolor=lightyellow, width=1.5, height=0.8];" - ) - parts.append(get_all_nodes(handoff)) - - return "".join(parts) - - -def get_all_edges(agent: Agent, parent: Optional[Agent] = None) -> str: - """ - Recursively generates the edges for the given agent and its handoffs in DOT format. - - Args: - agent (Agent): The agent for which the edges are to be generated. - parent (Agent, optional): The parent agent. Defaults to None. - - Returns: - str: The DOT format string representing the edges. - """ - parts = [] - - if not parent: - parts.append(f'"__start__" -> "{agent.name}";') - - for tool in agent.tools: - parts.append(f""" - "{agent.name}" -> "{tool.name}" [style=dotted, penwidth=1.5]; - "{tool.name}" -> "{agent.name}" [style=dotted, penwidth=1.5];""") - - for handoff in agent.handoffs: - if isinstance(handoff, Handoff): - parts.append(f""" - "{agent.name}" -> "{handoff.agent_name}";""") - if isinstance(handoff, Agent): - parts.append(f""" - "{agent.name}" -> "{handoff.name}";""") - parts.append(get_all_edges(handoff, agent)) - - if not agent.handoffs and not isinstance(agent, Tool): # type: ignore - parts.append(f'"{agent.name}" -> "__end__";') - - return "".join(parts) - - -def draw_graph(agent: Agent, filename: Optional[str] = None) -> graphviz.Source: - """ - Draws the graph for the given agent and optionally saves it as a PNG file. - - Args: - agent (Agent): The agent for which the graph is to be drawn. - filename (str): The name of the file to save the graph as a PNG. - - Returns: - graphviz.Source: The graphviz Source object representing the graph. - """ - dot_code = get_main_graph(agent) - graph = graphviz.Source(dot_code) - - if filename: - graph.render(filename, format="png") - - return graph diff --git a/src/agents/function_schema.py b/src/agents/function_schema.py deleted file mode 100644 index 681affce..00000000 --- a/src/agents/function_schema.py +++ /dev/null @@ -1,344 +0,0 @@ -from __future__ import annotations - -import contextlib -import inspect -import logging -import re -from dataclasses import dataclass -from typing import Any, Callable, Literal, get_args, get_origin, get_type_hints - -from griffe import Docstring, DocstringSectionKind -from pydantic import BaseModel, Field, create_model - -from .exceptions import UserError -from .run_context import RunContextWrapper -from .strict_schema import ensure_strict_json_schema - - -@dataclass -class FuncSchema: - """ - Captures the schema for a python function, in preparation for sending it to an LLM as a tool. - """ - - name: str - """The name of the function.""" - description: str | None - """The description of the function.""" - params_pydantic_model: type[BaseModel] - """A Pydantic model that represents the function's parameters.""" - params_json_schema: dict[str, Any] - """The JSON schema for the function's parameters, derived from the Pydantic model.""" - signature: inspect.Signature - """The signature of the function.""" - takes_context: bool = False - """Whether the function takes a RunContextWrapper argument (must be the first argument).""" - strict_json_schema: bool = True - """Whether the JSON schema is in strict mode. We **strongly** recommend setting this to True, - as it increases the likelihood of correct JSON input.""" - - def to_call_args(self, data: BaseModel) -> tuple[list[Any], dict[str, Any]]: - """ - Converts validated data from the Pydantic model into (args, kwargs), suitable for calling - the original function. - """ - positional_args: list[Any] = [] - keyword_args: dict[str, Any] = {} - seen_var_positional = False - - # Use enumerate() so we can skip the first parameter if it's context. - for idx, (name, param) in enumerate(self.signature.parameters.items()): - # If the function takes a RunContextWrapper and this is the first parameter, skip it. - if self.takes_context and idx == 0: - continue - - value = getattr(data, name, None) - if param.kind == param.VAR_POSITIONAL: - # e.g. *args: extend positional args and mark that *args is now seen - positional_args.extend(value or []) - seen_var_positional = True - elif param.kind == param.VAR_KEYWORD: - # e.g. **kwargs handling - keyword_args.update(value or {}) - elif param.kind in (param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD): - # Before *args, add to positional args. After *args, add to keyword args. - if not seen_var_positional: - positional_args.append(value) - else: - keyword_args[name] = value - else: - # For KEYWORD_ONLY parameters, always use keyword args. - keyword_args[name] = value - return positional_args, keyword_args - - -@dataclass -class FuncDocumentation: - """Contains metadata about a python function, extracted from its docstring.""" - - name: str - """The name of the function, via `__name__`.""" - description: str | None - """The description of the function, derived from the docstring.""" - param_descriptions: dict[str, str] | None - """The parameter descriptions of the function, derived from the docstring.""" - - -DocstringStyle = Literal["google", "numpy", "sphinx"] - - -# As of Feb 2025, the automatic style detection in griffe is an Insiders feature. This -# code approximates it. -def _detect_docstring_style(doc: str) -> DocstringStyle: - scores: dict[DocstringStyle, int] = {"sphinx": 0, "numpy": 0, "google": 0} - - # Sphinx style detection: look for :param, :type, :return:, and :rtype: - sphinx_patterns = [r"^:param\s", r"^:type\s", r"^:return:", r"^:rtype:"] - for pattern in sphinx_patterns: - if re.search(pattern, doc, re.MULTILINE): - scores["sphinx"] += 1 - - # Numpy style detection: look for headers like 'Parameters', 'Returns', or 'Yields' followed by - # a dashed underline - numpy_patterns = [ - r"^Parameters\s*\n\s*-{3,}", - r"^Returns\s*\n\s*-{3,}", - r"^Yields\s*\n\s*-{3,}", - ] - for pattern in numpy_patterns: - if re.search(pattern, doc, re.MULTILINE): - scores["numpy"] += 1 - - # Google style detection: look for section headers with a trailing colon - google_patterns = [r"^(Args|Arguments):", r"^(Returns):", r"^(Raises):"] - for pattern in google_patterns: - if re.search(pattern, doc, re.MULTILINE): - scores["google"] += 1 - - max_score = max(scores.values()) - if max_score == 0: - return "google" - - # Priority order: sphinx > numpy > google in case of tie - styles: list[DocstringStyle] = ["sphinx", "numpy", "google"] - - for style in styles: - if scores[style] == max_score: - return style - - return "google" - - -@contextlib.contextmanager -def _suppress_griffe_logging(): - # Supresses warnings about missing annotations for params - logger = logging.getLogger("griffe") - previous_level = logger.getEffectiveLevel() - logger.setLevel(logging.ERROR) - try: - yield - finally: - logger.setLevel(previous_level) - - -def generate_func_documentation( - func: Callable[..., Any], style: DocstringStyle | None = None -) -> FuncDocumentation: - """ - Extracts metadata from a function docstring, in preparation for sending it to an LLM as a tool. - - Args: - func: The function to extract documentation from. - style: The style of the docstring to use for parsing. If not provided, we will attempt to - auto-detect the style. - - Returns: - A FuncDocumentation object containing the function's name, description, and parameter - descriptions. - """ - name = func.__name__ - doc = inspect.getdoc(func) - if not doc: - return FuncDocumentation(name=name, description=None, param_descriptions=None) - - with _suppress_griffe_logging(): - docstring = Docstring(doc, lineno=1, parser=style or _detect_docstring_style(doc)) - parsed = docstring.parse() - - description: str | None = next( - (section.value for section in parsed if section.kind == DocstringSectionKind.text), None - ) - - param_descriptions: dict[str, str] = { - param.name: param.description - for section in parsed - if section.kind == DocstringSectionKind.parameters - for param in section.value - } - - return FuncDocumentation( - name=func.__name__, - description=description, - param_descriptions=param_descriptions or None, - ) - - -def function_schema( - func: Callable[..., Any], - docstring_style: DocstringStyle | None = None, - name_override: str | None = None, - description_override: str | None = None, - use_docstring_info: bool = True, - strict_json_schema: bool = True, -) -> FuncSchema: - """ - Given a python function, extracts a `FuncSchema` from it, capturing the name, description, - parameter descriptions, and other metadata. - - Args: - func: The function to extract the schema from. - docstring_style: The style of the docstring to use for parsing. If not provided, we will - attempt to auto-detect the style. - name_override: If provided, use this name instead of the function's `__name__`. - description_override: If provided, use this description instead of the one derived from the - docstring. - use_docstring_info: If True, uses the docstring to generate the description and parameter - descriptions. - strict_json_schema: Whether the JSON schema is in strict mode. If True, we'll ensure that - the schema adheres to the "strict" standard the OpenAI API expects. We **strongly** - recommend setting this to True, as it increases the likelihood of the LLM providing - correct JSON input. - - Returns: - A `FuncSchema` object containing the function's name, description, parameter descriptions, - and other metadata. - """ - - # 1. Grab docstring info - if use_docstring_info: - doc_info = generate_func_documentation(func, docstring_style) - param_descs = doc_info.param_descriptions or {} - else: - doc_info = None - param_descs = {} - - func_name = name_override or doc_info.name if doc_info else func.__name__ - - # 2. Inspect function signature and get type hints - sig = inspect.signature(func) - type_hints = get_type_hints(func) - params = list(sig.parameters.items()) - takes_context = False - filtered_params = [] - - if params: - first_name, first_param = params[0] - # Prefer the evaluated type hint if available - ann = type_hints.get(first_name, first_param.annotation) - if ann != inspect._empty: - origin = get_origin(ann) or ann - if origin is RunContextWrapper: - takes_context = True # Mark that the function takes context - else: - filtered_params.append((first_name, first_param)) - else: - filtered_params.append((first_name, first_param)) - - # For parameters other than the first, raise error if any use RunContextWrapper. - for name, param in params[1:]: - ann = type_hints.get(name, param.annotation) - if ann != inspect._empty: - origin = get_origin(ann) or ann - if origin is RunContextWrapper: - raise UserError( - f"RunContextWrapper param found at non-first position in function" - f" {func.__name__}" - ) - filtered_params.append((name, param)) - - # We will collect field definitions for create_model as a dict: - # field_name -> (type_annotation, default_value_or_Field(...)) - fields: dict[str, Any] = {} - - for name, param in filtered_params: - ann = type_hints.get(name, param.annotation) - default = param.default - - # If there's no type hint, assume `Any` - if ann == inspect._empty: - ann = Any - - # If a docstring param description exists, use it - field_description = param_descs.get(name, None) - - # Handle different parameter kinds - if param.kind == param.VAR_POSITIONAL: - # e.g. *args: extend positional args - if get_origin(ann) is tuple: - # e.g. def foo(*args: tuple[int, ...]) -> treat as List[int] - args_of_tuple = get_args(ann) - if len(args_of_tuple) == 2 and args_of_tuple[1] is Ellipsis: - ann = list[args_of_tuple[0]] # type: ignore - else: - ann = list[Any] - else: - # If user wrote *args: int, treat as List[int] - ann = list[ann] # type: ignore - - # Default factory to empty list - fields[name] = ( - ann, - Field(default_factory=list, description=field_description), # type: ignore - ) - - elif param.kind == param.VAR_KEYWORD: - # **kwargs handling - if get_origin(ann) is dict: - # e.g. def foo(**kwargs: dict[str, int]) - dict_args = get_args(ann) - if len(dict_args) == 2: - ann = dict[dict_args[0], dict_args[1]] # type: ignore - else: - ann = dict[str, Any] - else: - # e.g. def foo(**kwargs: int) -> Dict[str, int] - ann = dict[str, ann] # type: ignore - - fields[name] = ( - ann, - Field(default_factory=dict, description=field_description), # type: ignore - ) - - else: - # Normal parameter - if default == inspect._empty: - # Required field - fields[name] = ( - ann, - Field(..., description=field_description), - ) - else: - # Parameter with a default value - fields[name] = ( - ann, - Field(default=default, description=field_description), - ) - - # 3. Dynamically build a Pydantic model - dynamic_model = create_model(f"{func_name}_args", __base__=BaseModel, **fields) - - # 4. Build JSON schema from that model - json_schema = dynamic_model.model_json_schema() - if strict_json_schema: - json_schema = ensure_strict_json_schema(json_schema) - - # 5. Return as a FuncSchema dataclass - return FuncSchema( - name=func_name, - description=description_override or doc_info.description if doc_info else None, - params_pydantic_model=dynamic_model, - params_json_schema=json_schema, - signature=sig, - takes_context=takes_context, - strict_json_schema=strict_json_schema, - ) diff --git a/src/agents/guardrail.py b/src/agents/guardrail.py deleted file mode 100644 index a96f0f7d..00000000 --- a/src/agents/guardrail.py +++ /dev/null @@ -1,320 +0,0 @@ -from __future__ import annotations - -import inspect -from collections.abc import Awaitable -from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Callable, Generic, Union, overload - -from typing_extensions import TypeVar - -from .exceptions import UserError -from .items import TResponseInputItem -from .run_context import RunContextWrapper, TContext -from .util._types import MaybeAwaitable - -if TYPE_CHECKING: - from .agent import Agent - - -@dataclass -class GuardrailFunctionOutput: - """The output of a guardrail function.""" - - output_info: Any - """ - Optional information about the guardrail's output. For example, the guardrail could include - information about the checks it performed and granular results. - """ - - tripwire_triggered: bool - """ - Whether the tripwire was triggered. If triggered, the agent's execution will be halted. - """ - - -@dataclass -class InputGuardrailResult: - """The result of a guardrail run.""" - - guardrail: InputGuardrail[Any] - """ - The guardrail that was run. - """ - - output: GuardrailFunctionOutput - """The output of the guardrail function.""" - - -@dataclass -class OutputGuardrailResult: - """The result of a guardrail run.""" - - guardrail: OutputGuardrail[Any] - """ - The guardrail that was run. - """ - - agent_output: Any - """ - The output of the agent that was checked by the guardrail. - """ - - agent: Agent[Any] - """ - The agent that was checked by the guardrail. - """ - - output: GuardrailFunctionOutput - """The output of the guardrail function.""" - - -@dataclass -class InputGuardrail(Generic[TContext]): - """Input guardrails are checks that run in parallel to the agent's execution. - They can be used to do things like: - - Check if input messages are off-topic - - Take over control of the agent's execution if an unexpected input is detected - - You can use the `@input_guardrail()` decorator to turn a function into an `InputGuardrail`, or - create an `InputGuardrail` manually. - - Guardrails return a `GuardrailResult`. If `result.tripwire_triggered` is `True`, the agent - execution will immediately stop and a `InputGuardrailTripwireTriggered` exception will be raised - """ - - guardrail_function: Callable[ - [RunContextWrapper[TContext], Agent[Any], str | list[TResponseInputItem]], - MaybeAwaitable[GuardrailFunctionOutput], - ] - """A function that receives the agent input and the context, and returns a - `GuardrailResult`. The result marks whether the tripwire was triggered, and can optionally - include information about the guardrail's output. - """ - - name: str | None = None - """The name of the guardrail, used for tracing. If not provided, we'll use the guardrail - function's name. - """ - - def get_name(self) -> str: - if self.name: - return self.name - - return self.guardrail_function.__name__ - - async def run( - self, - agent: Agent[Any], - input: str | list[TResponseInputItem], - context: RunContextWrapper[TContext], - ) -> InputGuardrailResult: - if not callable(self.guardrail_function): - raise UserError(f"Guardrail function must be callable, got {self.guardrail_function}") - - output = self.guardrail_function(context, agent, input) - if inspect.isawaitable(output): - return InputGuardrailResult( - guardrail=self, - output=await output, - ) - - return InputGuardrailResult( - guardrail=self, - output=output, - ) - - -@dataclass -class OutputGuardrail(Generic[TContext]): - """Output guardrails are checks that run on the final output of an agent. - They can be used to do check if the output passes certain validation criteria - - You can use the `@output_guardrail()` decorator to turn a function into an `OutputGuardrail`, - or create an `OutputGuardrail` manually. - - Guardrails return a `GuardrailResult`. If `result.tripwire_triggered` is `True`, a - `OutputGuardrailTripwireTriggered` exception will be raised. - """ - - guardrail_function: Callable[ - [RunContextWrapper[TContext], Agent[Any], Any], - MaybeAwaitable[GuardrailFunctionOutput], - ] - """A function that receives the final agent, its output, and the context, and returns a - `GuardrailResult`. The result marks whether the tripwire was triggered, and can optionally - include information about the guardrail's output. - """ - - name: str | None = None - """The name of the guardrail, used for tracing. If not provided, we'll use the guardrail - function's name. - """ - - def get_name(self) -> str: - if self.name: - return self.name - - return self.guardrail_function.__name__ - - async def run( - self, context: RunContextWrapper[TContext], agent: Agent[Any], agent_output: Any - ) -> OutputGuardrailResult: - if not callable(self.guardrail_function): - raise UserError(f"Guardrail function must be callable, got {self.guardrail_function}") - - output = self.guardrail_function(context, agent, agent_output) - if inspect.isawaitable(output): - return OutputGuardrailResult( - guardrail=self, - agent=agent, - agent_output=agent_output, - output=await output, - ) - - return OutputGuardrailResult( - guardrail=self, - agent=agent, - agent_output=agent_output, - output=output, - ) - - -TContext_co = TypeVar("TContext_co", bound=Any, covariant=True) - -# For InputGuardrail -_InputGuardrailFuncSync = Callable[ - [RunContextWrapper[TContext_co], "Agent[Any]", Union[str, list[TResponseInputItem]]], - GuardrailFunctionOutput, -] -_InputGuardrailFuncAsync = Callable[ - [RunContextWrapper[TContext_co], "Agent[Any]", Union[str, list[TResponseInputItem]]], - Awaitable[GuardrailFunctionOutput], -] - - -@overload -def input_guardrail( - func: _InputGuardrailFuncSync[TContext_co], -) -> InputGuardrail[TContext_co]: ... - - -@overload -def input_guardrail( - func: _InputGuardrailFuncAsync[TContext_co], -) -> InputGuardrail[TContext_co]: ... - - -@overload -def input_guardrail( - *, - name: str | None = None, -) -> Callable[ - [_InputGuardrailFuncSync[TContext_co] | _InputGuardrailFuncAsync[TContext_co]], - InputGuardrail[TContext_co], -]: ... - - -def input_guardrail( - func: _InputGuardrailFuncSync[TContext_co] - | _InputGuardrailFuncAsync[TContext_co] - | None = None, - *, - name: str | None = None, -) -> ( - InputGuardrail[TContext_co] - | Callable[ - [_InputGuardrailFuncSync[TContext_co] | _InputGuardrailFuncAsync[TContext_co]], - InputGuardrail[TContext_co], - ] -): - """ - Decorator that transforms a sync or async function into an `InputGuardrail`. - It can be used directly (no parentheses) or with keyword args, e.g.: - - @input_guardrail - def my_sync_guardrail(...): ... - - @input_guardrail(name="guardrail_name") - async def my_async_guardrail(...): ... - """ - - def decorator( - f: _InputGuardrailFuncSync[TContext_co] | _InputGuardrailFuncAsync[TContext_co], - ) -> InputGuardrail[TContext_co]: - return InputGuardrail(guardrail_function=f, name=name) - - if func is not None: - # Decorator was used without parentheses - return decorator(func) - - # Decorator used with keyword arguments - return decorator - - -_OutputGuardrailFuncSync = Callable[ - [RunContextWrapper[TContext_co], "Agent[Any]", Any], - GuardrailFunctionOutput, -] -_OutputGuardrailFuncAsync = Callable[ - [RunContextWrapper[TContext_co], "Agent[Any]", Any], - Awaitable[GuardrailFunctionOutput], -] - - -@overload -def output_guardrail( - func: _OutputGuardrailFuncSync[TContext_co], -) -> OutputGuardrail[TContext_co]: ... - - -@overload -def output_guardrail( - func: _OutputGuardrailFuncAsync[TContext_co], -) -> OutputGuardrail[TContext_co]: ... - - -@overload -def output_guardrail( - *, - name: str | None = None, -) -> Callable[ - [_OutputGuardrailFuncSync[TContext_co] | _OutputGuardrailFuncAsync[TContext_co]], - OutputGuardrail[TContext_co], -]: ... - - -def output_guardrail( - func: _OutputGuardrailFuncSync[TContext_co] - | _OutputGuardrailFuncAsync[TContext_co] - | None = None, - *, - name: str | None = None, -) -> ( - OutputGuardrail[TContext_co] - | Callable[ - [_OutputGuardrailFuncSync[TContext_co] | _OutputGuardrailFuncAsync[TContext_co]], - OutputGuardrail[TContext_co], - ] -): - """ - Decorator that transforms a sync or async function into an `OutputGuardrail`. - It can be used directly (no parentheses) or with keyword args, e.g.: - - @output_guardrail - def my_sync_guardrail(...): ... - - @output_guardrail(name="guardrail_name") - async def my_async_guardrail(...): ... - """ - - def decorator( - f: _OutputGuardrailFuncSync[TContext_co] | _OutputGuardrailFuncAsync[TContext_co], - ) -> OutputGuardrail[TContext_co]: - return OutputGuardrail(guardrail_function=f, name=name) - - if func is not None: - # Decorator was used without parentheses - return decorator(func) - - # Decorator used with keyword arguments - return decorator diff --git a/src/agents/handoffs.py b/src/agents/handoffs.py deleted file mode 100644 index 686191f3..00000000 --- a/src/agents/handoffs.py +++ /dev/null @@ -1,236 +0,0 @@ -from __future__ import annotations - -import inspect -from collections.abc import Awaitable -from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Callable, Generic, cast, overload - -from pydantic import TypeAdapter -from typing_extensions import TypeAlias, TypeVar - -from .exceptions import ModelBehaviorError, UserError -from .items import RunItem, TResponseInputItem -from .run_context import RunContextWrapper, TContext -from .strict_schema import ensure_strict_json_schema -from .tracing.spans import SpanError -from .util import _error_tracing, _json, _transforms - -if TYPE_CHECKING: - from .agent import Agent - - -# The handoff input type is the type of data passed when the agent is called via a handoff. -THandoffInput = TypeVar("THandoffInput", default=Any) - -OnHandoffWithInput = Callable[[RunContextWrapper[Any], THandoffInput], Any] -OnHandoffWithoutInput = Callable[[RunContextWrapper[Any]], Any] - - -@dataclass(frozen=True) -class HandoffInputData: - input_history: str | tuple[TResponseInputItem, ...] - """ - The input history before `Runner.run()` was called. - """ - - pre_handoff_items: tuple[RunItem, ...] - """ - The items generated before the agent turn where the handoff was invoked. - """ - - new_items: tuple[RunItem, ...] - """ - The new items generated during the current agent turn, including the item that triggered the - handoff and the tool output message representing the response from the handoff output. - """ - - -HandoffInputFilter: TypeAlias = Callable[[HandoffInputData], HandoffInputData] -"""A function that filters the input data passed to the next agent.""" - - -@dataclass -class Handoff(Generic[TContext]): - """A handoff is when an agent delegates a task to another agent. - For example, in a customer support scenario you might have a "triage agent" that determines - which agent should handle the user's request, and sub-agents that specialize in different - areas like billing, account management, etc. - """ - - tool_name: str - """The name of the tool that represents the handoff.""" - - tool_description: str - """The description of the tool that represents the handoff.""" - - input_json_schema: dict[str, Any] - """The JSON schema for the handoff input. Can be empty if the handoff does not take an input. - """ - - on_invoke_handoff: Callable[[RunContextWrapper[Any], str], Awaitable[Agent[TContext]]] - """The function that invokes the handoff. The parameters passed are: - 1. The handoff run context - 2. The arguments from the LLM, as a JSON string. Empty string if input_json_schema is empty. - - Must return an agent. - """ - - agent_name: str - """The name of the agent that is being handed off to.""" - - input_filter: HandoffInputFilter | None = None - """A function that filters the inputs that are passed to the next agent. By default, the new - agent sees the entire conversation history. In some cases, you may want to filter inputs e.g. - to remove older inputs, or remove tools from existing inputs. - - The function will receive the entire conversation history so far, including the input item - that triggered the handoff and a tool call output item representing the handoff tool's output. - - You are free to modify the input history or new items as you see fit. The next agent that - runs will receive `handoff_input_data.all_items`. - - IMPORTANT: in streaming mode, we will not stream anything as a result of this function. The - items generated before will already have been streamed. - """ - - strict_json_schema: bool = True - """Whether the input JSON schema is in strict mode. We **strongly** recommend setting this to - True, as it increases the likelihood of correct JSON input. - """ - - def get_transfer_message(self, agent: Agent[Any]) -> str: - base = f"{{'assistant': '{agent.name}'}}" - return base - - @classmethod - def default_tool_name(cls, agent: Agent[Any]) -> str: - return _transforms.transform_string_function_style(f"transfer_to_{agent.name}") - - @classmethod - def default_tool_description(cls, agent: Agent[Any]) -> str: - return ( - f"Handoff to the {agent.name} agent to handle the request. " - f"{agent.handoff_description or ''}" - ) - - -@overload -def handoff( - agent: Agent[TContext], - *, - tool_name_override: str | None = None, - tool_description_override: str | None = None, - input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None, -) -> Handoff[TContext]: ... - - -@overload -def handoff( - agent: Agent[TContext], - *, - on_handoff: OnHandoffWithInput[THandoffInput], - input_type: type[THandoffInput], - tool_description_override: str | None = None, - tool_name_override: str | None = None, - input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None, -) -> Handoff[TContext]: ... - - -@overload -def handoff( - agent: Agent[TContext], - *, - on_handoff: OnHandoffWithoutInput, - tool_description_override: str | None = None, - tool_name_override: str | None = None, - input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None, -) -> Handoff[TContext]: ... - - -def handoff( - agent: Agent[TContext], - tool_name_override: str | None = None, - tool_description_override: str | None = None, - on_handoff: OnHandoffWithInput[THandoffInput] | OnHandoffWithoutInput | None = None, - input_type: type[THandoffInput] | None = None, - input_filter: Callable[[HandoffInputData], HandoffInputData] | None = None, -) -> Handoff[TContext]: - """Create a handoff from an agent. - - Args: - agent: The agent to handoff to, or a function that returns an agent. - tool_name_override: Optional override for the name of the tool that represents the handoff. - tool_description_override: Optional override for the description of the tool that - represents the handoff. - on_handoff: A function that runs when the handoff is invoked. - input_type: the type of the input to the handoff. If provided, the input will be validated - against this type. Only relevant if you pass a function that takes an input. - input_filter: a function that filters the inputs that are passed to the next agent. - """ - assert (on_handoff and input_type) or not (on_handoff and input_type), ( - "You must provide either both on_input and input_type, or neither" - ) - type_adapter: TypeAdapter[Any] | None - if input_type is not None: - assert callable(on_handoff), "on_handoff must be callable" - sig = inspect.signature(on_handoff) - if len(sig.parameters) != 2: - raise UserError("on_handoff must take two arguments: context and input") - - type_adapter = TypeAdapter(input_type) - input_json_schema = type_adapter.json_schema() - else: - type_adapter = None - input_json_schema = {} - if on_handoff is not None: - sig = inspect.signature(on_handoff) - if len(sig.parameters) != 1: - raise UserError("on_handoff must take one argument: context") - - async def _invoke_handoff( - ctx: RunContextWrapper[Any], input_json: str | None = None - ) -> Agent[Any]: - if input_type is not None and type_adapter is not None: - if input_json is None: - _error_tracing.attach_error_to_current_span( - SpanError( - message="Handoff function expected non-null input, but got None", - data={"details": "input_json is None"}, - ) - ) - raise ModelBehaviorError("Handoff function expected non-null input, but got None") - - validated_input = _json.validate_json( - json_str=input_json, - type_adapter=type_adapter, - partial=False, - ) - input_func = cast(OnHandoffWithInput[THandoffInput], on_handoff) - if inspect.iscoroutinefunction(input_func): - await input_func(ctx, validated_input) - else: - input_func(ctx, validated_input) - elif on_handoff is not None: - no_input_func = cast(OnHandoffWithoutInput, on_handoff) - if inspect.iscoroutinefunction(no_input_func): - await no_input_func(ctx) - else: - no_input_func(ctx) - - return agent - - tool_name = tool_name_override or Handoff.default_tool_name(agent) - tool_description = tool_description_override or Handoff.default_tool_description(agent) - - # Always ensure the input JSON schema is in strict mode - # If there is a need, we can make this configurable in the future - input_json_schema = ensure_strict_json_schema(input_json_schema) - - return Handoff( - tool_name=tool_name, - tool_description=tool_description, - input_json_schema=input_json_schema, - on_invoke_handoff=_invoke_handoff, - input_filter=input_filter, - agent_name=agent.name, - ) diff --git a/src/agents/items.py b/src/agents/items.py deleted file mode 100644 index c2af0dfc..00000000 --- a/src/agents/items.py +++ /dev/null @@ -1,248 +0,0 @@ -from __future__ import annotations - -import abc -import copy -from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Generic, Literal, TypeVar, Union - -from openai.types.responses import ( - Response, - ResponseComputerToolCall, - ResponseFileSearchToolCall, - ResponseFunctionToolCall, - ResponseFunctionWebSearch, - ResponseInputItemParam, - ResponseOutputItem, - ResponseOutputMessage, - ResponseOutputRefusal, - ResponseOutputText, - ResponseStreamEvent, -) -from openai.types.responses.response_input_item_param import ComputerCallOutput, FunctionCallOutput -from openai.types.responses.response_reasoning_item import ResponseReasoningItem -from pydantic import BaseModel -from typing_extensions import TypeAlias - -from .exceptions import AgentsException, ModelBehaviorError -from .usage import Usage - -if TYPE_CHECKING: - from .agent import Agent - -TResponse = Response -"""A type alias for the Response type from the OpenAI SDK.""" - -TResponseInputItem = ResponseInputItemParam -"""A type alias for the ResponseInputItemParam type from the OpenAI SDK.""" - -TResponseOutputItem = ResponseOutputItem -"""A type alias for the ResponseOutputItem type from the OpenAI SDK.""" - -TResponseStreamEvent = ResponseStreamEvent -"""A type alias for the ResponseStreamEvent type from the OpenAI SDK.""" - -T = TypeVar("T", bound=Union[TResponseOutputItem, TResponseInputItem]) - - -@dataclass -class RunItemBase(Generic[T], abc.ABC): - agent: Agent[Any] - """The agent whose run caused this item to be generated.""" - - raw_item: T - """The raw Responses item from the run. This will always be a either an output item (i.e. - `openai.types.responses.ResponseOutputItem` or an input item - (i.e. `openai.types.responses.ResponseInputItemParam`). - """ - - def to_input_item(self) -> TResponseInputItem: - """Converts this item into an input item suitable for passing to the model.""" - if isinstance(self.raw_item, dict): - # We know that input items are dicts, so we can ignore the type error - return self.raw_item # type: ignore - elif isinstance(self.raw_item, BaseModel): - # All output items are Pydantic models that can be converted to input items. - return self.raw_item.model_dump(exclude_unset=True) # type: ignore - else: - raise AgentsException(f"Unexpected raw item type: {type(self.raw_item)}") - - -@dataclass -class MessageOutputItem(RunItemBase[ResponseOutputMessage]): - """Represents a message from the LLM.""" - - raw_item: ResponseOutputMessage - """The raw response output message.""" - - type: Literal["message_output_item"] = "message_output_item" - - -@dataclass -class HandoffCallItem(RunItemBase[ResponseFunctionToolCall]): - """Represents a tool call for a handoff from one agent to another.""" - - raw_item: ResponseFunctionToolCall - """The raw response function tool call that represents the handoff.""" - - type: Literal["handoff_call_item"] = "handoff_call_item" - - -@dataclass -class HandoffOutputItem(RunItemBase[TResponseInputItem]): - """Represents the output of a handoff.""" - - raw_item: TResponseInputItem - """The raw input item that represents the handoff taking place.""" - - source_agent: Agent[Any] - """The agent that made the handoff.""" - - target_agent: Agent[Any] - """The agent that is being handed off to.""" - - type: Literal["handoff_output_item"] = "handoff_output_item" - - -ToolCallItemTypes: TypeAlias = Union[ - ResponseFunctionToolCall, - ResponseComputerToolCall, - ResponseFileSearchToolCall, - ResponseFunctionWebSearch, -] -"""A type that represents a tool call item.""" - - -@dataclass -class ToolCallItem(RunItemBase[ToolCallItemTypes]): - """Represents a tool call e.g. a function call or computer action call.""" - - raw_item: ToolCallItemTypes - """The raw tool call item.""" - - type: Literal["tool_call_item"] = "tool_call_item" - - -@dataclass -class ToolCallOutputItem(RunItemBase[Union[FunctionCallOutput, ComputerCallOutput]]): - """Represents the output of a tool call.""" - - raw_item: FunctionCallOutput | ComputerCallOutput - """The raw item from the model.""" - - output: Any - """The output of the tool call. This is whatever the tool call returned; the `raw_item` - contains a string representation of the output. - """ - - type: Literal["tool_call_output_item"] = "tool_call_output_item" - - -@dataclass -class ReasoningItem(RunItemBase[ResponseReasoningItem]): - """Represents a reasoning item.""" - - raw_item: ResponseReasoningItem - """The raw reasoning item.""" - - type: Literal["reasoning_item"] = "reasoning_item" - - -RunItem: TypeAlias = Union[ - MessageOutputItem, - HandoffCallItem, - HandoffOutputItem, - ToolCallItem, - ToolCallOutputItem, - ReasoningItem, -] -"""An item generated by an agent.""" - - -@dataclass -class ModelResponse: - output: list[TResponseOutputItem] - """A list of outputs (messages, tool calls, etc) generated by the model""" - - usage: Usage - """The usage information for the response.""" - - referenceable_id: str | None - """An ID for the response which can be used to refer to the response in subsequent calls to the - model. Not supported by all model providers. - """ - - def to_input_items(self) -> list[TResponseInputItem]: - """Convert the output into a list of input items suitable for passing to the model.""" - # We happen to know that the shape of the Pydantic output items are the same as the - # equivalent TypedDict input items, so we can just convert each one. - # This is also tested via unit tests. - return [it.model_dump(exclude_unset=True) for it in self.output] # type: ignore - - -class ItemHelpers: - @classmethod - def extract_last_content(cls, message: TResponseOutputItem) -> str: - """Extracts the last text content or refusal from a message.""" - if not isinstance(message, ResponseOutputMessage): - return "" - - last_content = message.content[-1] - if isinstance(last_content, ResponseOutputText): - return last_content.text - elif isinstance(last_content, ResponseOutputRefusal): - return last_content.refusal - else: - raise ModelBehaviorError(f"Unexpected content type: {type(last_content)}") - - @classmethod - def extract_last_text(cls, message: TResponseOutputItem) -> str | None: - """Extracts the last text content from a message, if any. Ignores refusals.""" - if isinstance(message, ResponseOutputMessage): - last_content = message.content[-1] - if isinstance(last_content, ResponseOutputText): - return last_content.text - - return None - - @classmethod - def input_to_new_input_list( - cls, input: str | list[TResponseInputItem] - ) -> list[TResponseInputItem]: - """Converts a string or list of input items into a list of input items.""" - if isinstance(input, str): - return [ - { - "content": input, - "role": "user", - } - ] - return copy.deepcopy(input) - - @classmethod - def text_message_outputs(cls, items: list[RunItem]) -> str: - """Concatenates all the text content from a list of message output items.""" - text = "" - for item in items: - if isinstance(item, MessageOutputItem): - text += cls.text_message_output(item) - return text - - @classmethod - def text_message_output(cls, message: MessageOutputItem) -> str: - """Extracts all the text content from a single message output item.""" - text = "" - for item in message.raw_item.content: - if isinstance(item, ResponseOutputText): - text += item.text - return text - - @classmethod - def tool_call_output_item( - cls, tool_call: ResponseFunctionToolCall, output: str - ) -> FunctionCallOutput: - """Creates a tool call output item from a tool call and its output.""" - return { - "call_id": tool_call.call_id, - "output": output, - "type": "function_call_output", - } diff --git a/src/agents/lifecycle.py b/src/agents/lifecycle.py deleted file mode 100644 index 8643248b..00000000 --- a/src/agents/lifecycle.py +++ /dev/null @@ -1,105 +0,0 @@ -from typing import Any, Generic - -from .agent import Agent -from .run_context import RunContextWrapper, TContext -from .tool import Tool - - -class RunHooks(Generic[TContext]): - """A class that receives callbacks on various lifecycle events in an agent run. Subclass and - override the methods you need. - """ - - async def on_agent_start( - self, context: RunContextWrapper[TContext], agent: Agent[TContext] - ) -> None: - """Called before the agent is invoked. Called each time the current agent changes.""" - pass - - async def on_agent_end( - self, - context: RunContextWrapper[TContext], - agent: Agent[TContext], - output: Any, - ) -> None: - """Called when the agent produces a final output.""" - pass - - async def on_handoff( - self, - context: RunContextWrapper[TContext], - from_agent: Agent[TContext], - to_agent: Agent[TContext], - ) -> None: - """Called when a handoff occurs.""" - pass - - async def on_tool_start( - self, - context: RunContextWrapper[TContext], - agent: Agent[TContext], - tool: Tool, - ) -> None: - """Called before a tool is invoked.""" - pass - - async def on_tool_end( - self, - context: RunContextWrapper[TContext], - agent: Agent[TContext], - tool: Tool, - result: str, - ) -> None: - """Called after a tool is invoked.""" - pass - - -class AgentHooks(Generic[TContext]): - """A class that receives callbacks on various lifecycle events for a specific agent. You can - set this on `agent.hooks` to receive events for that specific agent. - - Subclass and override the methods you need. - """ - - async def on_start(self, context: RunContextWrapper[TContext], agent: Agent[TContext]) -> None: - """Called before the agent is invoked. Called each time the running agent is changed to this - agent.""" - pass - - async def on_end( - self, - context: RunContextWrapper[TContext], - agent: Agent[TContext], - output: Any, - ) -> None: - """Called when the agent produces a final output.""" - pass - - async def on_handoff( - self, - context: RunContextWrapper[TContext], - agent: Agent[TContext], - source: Agent[TContext], - ) -> None: - """Called when the agent is being handed off to. The `source` is the agent that is handing - off to this agent.""" - pass - - async def on_tool_start( - self, - context: RunContextWrapper[TContext], - agent: Agent[TContext], - tool: Tool, - ) -> None: - """Called before a tool is invoked.""" - pass - - async def on_tool_end( - self, - context: RunContextWrapper[TContext], - agent: Agent[TContext], - tool: Tool, - result: str, - ) -> None: - """Called after a tool is invoked.""" - pass diff --git a/src/agents/logger.py b/src/agents/logger.py deleted file mode 100644 index bd81a827..00000000 --- a/src/agents/logger.py +++ /dev/null @@ -1,3 +0,0 @@ -import logging - -logger = logging.getLogger("openai.agents") diff --git a/src/agents/mcp/__init__.py b/src/agents/mcp/__init__.py deleted file mode 100644 index 1a72a89f..00000000 --- a/src/agents/mcp/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -try: - from .server import ( - MCPServer, - MCPServerSse, - MCPServerSseParams, - MCPServerStdio, - MCPServerStdioParams, - ) -except ImportError: - pass - -from .util import MCPUtil - -__all__ = [ - "MCPServer", - "MCPServerSse", - "MCPServerSseParams", - "MCPServerStdio", - "MCPServerStdioParams", - "MCPUtil", -] diff --git a/src/agents/mcp/server.py b/src/agents/mcp/server.py deleted file mode 100644 index e70d7ce6..00000000 --- a/src/agents/mcp/server.py +++ /dev/null @@ -1,301 +0,0 @@ -from __future__ import annotations - -import abc -import asyncio -from contextlib import AbstractAsyncContextManager, AsyncExitStack -from pathlib import Path -from typing import Any, Literal - -from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream -from mcp import ClientSession, StdioServerParameters, Tool as MCPTool, stdio_client -from mcp.client.sse import sse_client -from mcp.types import CallToolResult, JSONRPCMessage -from typing_extensions import NotRequired, TypedDict - -from ..exceptions import UserError -from ..logger import logger - - -class MCPServer(abc.ABC): - """Base class for Model Context Protocol servers.""" - - @abc.abstractmethod - async def connect(self): - """Connect to the server. For example, this might mean spawning a subprocess or - opening a network connection. The server is expected to remain connected until - `cleanup()` is called. - """ - pass - - @property - @abc.abstractmethod - def name(self) -> str: - """A readable name for the server.""" - pass - - @abc.abstractmethod - async def cleanup(self): - """Cleanup the server. For example, this might mean closing a subprocess or - closing a network connection. - """ - pass - - @abc.abstractmethod - async def list_tools(self) -> list[MCPTool]: - """List the tools available on the server.""" - pass - - @abc.abstractmethod - async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None) -> CallToolResult: - """Invoke a tool on the server.""" - pass - - -class _MCPServerWithClientSession(MCPServer, abc.ABC): - """Base class for MCP servers that use a `ClientSession` to communicate with the server.""" - - def __init__(self, cache_tools_list: bool): - """ - Args: - cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be - cached and only fetched from the server once. If `False`, the tools list will be - fetched from the server on each call to `list_tools()`. The cache can be invalidated - by calling `invalidate_tools_cache()`. You should set this to `True` if you know the - server will not change its tools list, because it can drastically improve latency - (by avoiding a round-trip to the server every time). - """ - self.session: ClientSession | None = None - self.exit_stack: AsyncExitStack = AsyncExitStack() - self._cleanup_lock: asyncio.Lock = asyncio.Lock() - self.cache_tools_list = cache_tools_list - - # The cache is always dirty at startup, so that we fetch tools at least once - self._cache_dirty = True - self._tools_list: list[MCPTool] | None = None - - @abc.abstractmethod - def create_streams( - self, - ) -> AbstractAsyncContextManager[ - tuple[ - MemoryObjectReceiveStream[JSONRPCMessage | Exception], - MemoryObjectSendStream[JSONRPCMessage], - ] - ]: - """Create the streams for the server.""" - pass - - async def __aenter__(self): - await self.connect() - return self - - async def __aexit__(self, exc_type, exc_value, traceback): - await self.cleanup() - - def invalidate_tools_cache(self): - """Invalidate the tools cache.""" - self._cache_dirty = True - - async def connect(self): - """Connect to the server.""" - try: - transport = await self.exit_stack.enter_async_context(self.create_streams()) - read, write = transport - session = await self.exit_stack.enter_async_context(ClientSession(read, write)) - await session.initialize() - self.session = session - except Exception as e: - logger.error(f"Error initializing MCP server: {e}") - await self.cleanup() - raise - - async def list_tools(self) -> list[MCPTool]: - """List the tools available on the server.""" - if not self.session: - raise UserError("Server not initialized. Make sure you call `connect()` first.") - - # Return from cache if caching is enabled, we have tools, and the cache is not dirty - if self.cache_tools_list and not self._cache_dirty and self._tools_list: - return self._tools_list - - # Reset the cache dirty to False - self._cache_dirty = False - - # Fetch the tools from the server - self._tools_list = (await self.session.list_tools()).tools - return self._tools_list - - async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None) -> CallToolResult: - """Invoke a tool on the server.""" - if not self.session: - raise UserError("Server not initialized. Make sure you call `connect()` first.") - - return await self.session.call_tool(tool_name, arguments) - - async def cleanup(self): - """Cleanup the server.""" - async with self._cleanup_lock: - try: - await self.exit_stack.aclose() - self.session = None - except Exception as e: - logger.error(f"Error cleaning up server: {e}") - - -class MCPServerStdioParams(TypedDict): - """Mirrors `mcp.client.stdio.StdioServerParameters`, but lets you pass params without another - import. - """ - - command: str - """The executable to run to start the server. For example, `python` or `node`.""" - - args: NotRequired[list[str]] - """Command line args to pass to the `command` executable. For example, `['foo.py']` or - `['server.js', '--port', '8080']`.""" - - env: NotRequired[dict[str, str]] - """The environment variables to set for the server. .""" - - cwd: NotRequired[str | Path] - """The working directory to use when spawning the process.""" - - encoding: NotRequired[str] - """The text encoding used when sending/receiving messages to the server. Defaults to `utf-8`.""" - - encoding_error_handler: NotRequired[Literal["strict", "ignore", "replace"]] - """The text encoding error handler. Defaults to `strict`. - - See https://docs.python.org/3/library/codecs.html#codec-base-classes for - explanations of possible values. - """ - - -class MCPServerStdio(_MCPServerWithClientSession): - """MCP server implementation that uses the stdio transport. See the [spec] - (https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#stdio) for - details. - """ - - def __init__( - self, - params: MCPServerStdioParams, - cache_tools_list: bool = False, - name: str | None = None, - ): - """Create a new MCP server based on the stdio transport. - - Args: - params: The params that configure the server. This includes the command to run to - start the server, the args to pass to the command, the environment variables to - set for the server, the working directory to use when spawning the process, and - the text encoding used when sending/receiving messages to the server. - cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be - cached and only fetched from the server once. If `False`, the tools list will be - fetched from the server on each call to `list_tools()`. The cache can be - invalidated by calling `invalidate_tools_cache()`. You should set this to `True` - if you know the server will not change its tools list, because it can drastically - improve latency (by avoiding a round-trip to the server every time). - name: A readable name for the server. If not provided, we'll create one from the - command. - """ - super().__init__(cache_tools_list) - - self.params = StdioServerParameters( - command=params["command"], - args=params.get("args", []), - env=params.get("env"), - cwd=params.get("cwd"), - encoding=params.get("encoding", "utf-8"), - encoding_error_handler=params.get("encoding_error_handler", "strict"), - ) - - self._name = name or f"stdio: {self.params.command}" - - def create_streams( - self, - ) -> AbstractAsyncContextManager[ - tuple[ - MemoryObjectReceiveStream[JSONRPCMessage | Exception], - MemoryObjectSendStream[JSONRPCMessage], - ] - ]: - """Create the streams for the server.""" - return stdio_client(self.params) - - @property - def name(self) -> str: - """A readable name for the server.""" - return self._name - - -class MCPServerSseParams(TypedDict): - """Mirrors the params in`mcp.client.sse.sse_client`.""" - - url: str - """The URL of the server.""" - - headers: NotRequired[dict[str, str]] - """The headers to send to the server.""" - - timeout: NotRequired[float] - """The timeout for the HTTP request. Defaults to 5 seconds.""" - - sse_read_timeout: NotRequired[float] - """The timeout for the SSE connection, in seconds. Defaults to 5 minutes.""" - - -class MCPServerSse(_MCPServerWithClientSession): - """MCP server implementation that uses the HTTP with SSE transport. See the [spec] - (https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#http-with-sse) - for details. - """ - - def __init__( - self, - params: MCPServerSseParams, - cache_tools_list: bool = False, - name: str | None = None, - ): - """Create a new MCP server based on the HTTP with SSE transport. - - Args: - params: The params that configure the server. This includes the URL of the server, - the headers to send to the server, the timeout for the HTTP request, and the - timeout for the SSE connection. - - cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be - cached and only fetched from the server once. If `False`, the tools list will be - fetched from the server on each call to `list_tools()`. The cache can be - invalidated by calling `invalidate_tools_cache()`. You should set this to `True` - if you know the server will not change its tools list, because it can drastically - improve latency (by avoiding a round-trip to the server every time). - - name: A readable name for the server. If not provided, we'll create one from the - URL. - """ - super().__init__(cache_tools_list) - - self.params = params - self._name = name or f"sse: {self.params['url']}" - - def create_streams( - self, - ) -> AbstractAsyncContextManager[ - tuple[ - MemoryObjectReceiveStream[JSONRPCMessage | Exception], - MemoryObjectSendStream[JSONRPCMessage], - ] - ]: - """Create the streams for the server.""" - return sse_client( - url=self.params["url"], - headers=self.params.get("headers", None), - timeout=self.params.get("timeout", 5), - sse_read_timeout=self.params.get("sse_read_timeout", 60 * 5), - ) - - @property - def name(self) -> str: - """A readable name for the server.""" - return self._name diff --git a/src/agents/mcp/util.py b/src/agents/mcp/util.py deleted file mode 100644 index bbfe1885..00000000 --- a/src/agents/mcp/util.py +++ /dev/null @@ -1,136 +0,0 @@ -import functools -import json -from typing import TYPE_CHECKING, Any - -from agents.strict_schema import ensure_strict_json_schema - -from .. import _debug -from ..exceptions import AgentsException, ModelBehaviorError, UserError -from ..logger import logger -from ..run_context import RunContextWrapper -from ..tool import FunctionTool, Tool -from ..tracing import FunctionSpanData, get_current_span, mcp_tools_span - -if TYPE_CHECKING: - from mcp.types import Tool as MCPTool - - from .server import MCPServer - - -class MCPUtil: - """Set of utilities for interop between MCP and Agents SDK tools.""" - - @classmethod - async def get_all_function_tools( - cls, servers: list["MCPServer"], convert_schemas_to_strict: bool - ) -> list[Tool]: - """Get all function tools from a list of MCP servers.""" - tools = [] - tool_names: set[str] = set() - for server in servers: - server_tools = await cls.get_function_tools(server, convert_schemas_to_strict) - server_tool_names = {tool.name for tool in server_tools} - if len(server_tool_names & tool_names) > 0: - raise UserError( - f"Duplicate tool names found across MCP servers: " - f"{server_tool_names & tool_names}" - ) - tool_names.update(server_tool_names) - tools.extend(server_tools) - - return tools - - @classmethod - async def get_function_tools( - cls, server: "MCPServer", convert_schemas_to_strict: bool - ) -> list[Tool]: - """Get all function tools from a single MCP server.""" - - with mcp_tools_span(server=server.name) as span: - tools = await server.list_tools() - span.span_data.result = [tool.name for tool in tools] - - return [cls.to_function_tool(tool, server, convert_schemas_to_strict) for tool in tools] - - @classmethod - def to_function_tool( - cls, tool: "MCPTool", server: "MCPServer", convert_schemas_to_strict: bool - ) -> FunctionTool: - """Convert an MCP tool to an Agents SDK function tool.""" - invoke_func = functools.partial(cls.invoke_mcp_tool, server, tool) - schema, is_strict = tool.inputSchema, False - - # MCP spec doesn't require the inputSchema to have `properties`, but OpenAI spec does. - if "properties" not in schema: - schema["properties"] = {} - - if convert_schemas_to_strict: - try: - schema = ensure_strict_json_schema(schema) - is_strict = True - except Exception as e: - logger.info(f"Error converting MCP schema to strict mode: {e}") - - return FunctionTool( - name=tool.name, - description=tool.description or "", - params_json_schema=schema, - on_invoke_tool=invoke_func, - strict_json_schema=is_strict, - ) - - @classmethod - async def invoke_mcp_tool( - cls, server: "MCPServer", tool: "MCPTool", context: RunContextWrapper[Any], input_json: str - ) -> str: - """Invoke an MCP tool and return the result as a string.""" - try: - json_data: dict[str, Any] = json.loads(input_json) if input_json else {} - except Exception as e: - if _debug.DONT_LOG_TOOL_DATA: - logger.debug(f"Invalid JSON input for tool {tool.name}") - else: - logger.debug(f"Invalid JSON input for tool {tool.name}: {input_json}") - raise ModelBehaviorError( - f"Invalid JSON input for tool {tool.name}: {input_json}" - ) from e - - if _debug.DONT_LOG_TOOL_DATA: - logger.debug(f"Invoking MCP tool {tool.name}") - else: - logger.debug(f"Invoking MCP tool {tool.name} with input {input_json}") - - try: - result = await server.call_tool(tool.name, json_data) - except Exception as e: - logger.error(f"Error invoking MCP tool {tool.name}: {e}") - raise AgentsException(f"Error invoking MCP tool {tool.name}: {e}") from e - - if _debug.DONT_LOG_TOOL_DATA: - logger.debug(f"MCP tool {tool.name} completed.") - else: - logger.debug(f"MCP tool {tool.name} returned {result}") - - # The MCP tool result is a list of content items, whereas OpenAI tool outputs are a single - # string. We'll try to convert. - if len(result.content) == 1: - tool_output = result.content[0].model_dump_json() - elif len(result.content) > 1: - tool_output = json.dumps([item.model_dump() for item in result.content]) - else: - logger.error(f"Errored MCP tool result: {result}") - tool_output = "Error running tool." - - current_span = get_current_span() - if current_span: - if isinstance(current_span.span_data, FunctionSpanData): - current_span.span_data.output = tool_output - current_span.span_data.mcp_data = { - "server": server.name, - } - else: - logger.warning( - f"Current span is not a FunctionSpanData, skipping tool output: {current_span}" - ) - - return tool_output diff --git a/src/agents/model_settings.py b/src/agents/model_settings.py deleted file mode 100644 index f29cfa4a..00000000 --- a/src/agents/model_settings.py +++ /dev/null @@ -1,72 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass, fields, replace -from typing import Literal - -from openai.types.shared import Reasoning - - -@dataclass -class ModelSettings: - """Settings to use when calling an LLM. - - This class holds optional model configuration parameters (e.g. temperature, - top_p, penalties, truncation, etc.). - - Not all models/providers support all of these parameters, so please check the API documentation - for the specific model and provider you are using. - """ - - temperature: float | None = None - """The temperature to use when calling the model.""" - - top_p: float | None = None - """The top_p to use when calling the model.""" - - frequency_penalty: float | None = None - """The frequency penalty to use when calling the model.""" - - presence_penalty: float | None = None - """The presence penalty to use when calling the model.""" - - tool_choice: Literal["auto", "required", "none"] | str | None = None - """The tool choice to use when calling the model.""" - - parallel_tool_calls: bool | None = None - """Whether to use parallel tool calls when calling the model. - Defaults to False if not provided.""" - - truncation: Literal["auto", "disabled"] | None = None - """The truncation strategy to use when calling the model.""" - - max_tokens: int | None = None - """The maximum number of output tokens to generate.""" - - reasoning: Reasoning | None = None - """Configuration options for - [reasoning models](https://platform.openai.com/docs/guides/reasoning). - """ - - metadata: dict[str, str] | None = None - """Metadata to include with the model response call.""" - - store: bool | None = None - """Whether to store the generated model response for later retrieval. - Defaults to True if not provided.""" - - include_usage: bool | None = None - """Whether to include usage chunk. - Defaults to True if not provided.""" - - def resolve(self, override: ModelSettings | None) -> ModelSettings: - """Produce a new ModelSettings by overlaying any non-None values from the - override on top of this instance.""" - if override is None: - return self - - changes = { - field.name: getattr(override, field.name) - for field in fields(self) - if getattr(override, field.name) is not None - } - return replace(self, **changes) diff --git a/src/agents/models/_openai_shared.py b/src/agents/models/_openai_shared.py deleted file mode 100644 index 2e145018..00000000 --- a/src/agents/models/_openai_shared.py +++ /dev/null @@ -1,34 +0,0 @@ -from __future__ import annotations - -from openai import AsyncOpenAI - -_default_openai_key: str | None = None -_default_openai_client: AsyncOpenAI | None = None -_use_responses_by_default: bool = True - - -def set_default_openai_key(key: str) -> None: - global _default_openai_key - _default_openai_key = key - - -def get_default_openai_key() -> str | None: - return _default_openai_key - - -def set_default_openai_client(client: AsyncOpenAI) -> None: - global _default_openai_client - _default_openai_client = client - - -def get_default_openai_client() -> AsyncOpenAI | None: - return _default_openai_client - - -def set_use_responses_by_default(use_responses: bool) -> None: - global _use_responses_by_default - _use_responses_by_default = use_responses - - -def get_use_responses_by_default() -> bool: - return _use_responses_by_default diff --git a/src/agents/models/fake_id.py b/src/agents/models/fake_id.py deleted file mode 100644 index 0565b0a7..00000000 --- a/src/agents/models/fake_id.py +++ /dev/null @@ -1,5 +0,0 @@ -FAKE_RESPONSES_ID = "__fake_id__" -"""This is a placeholder ID used to fill in the `id` field in Responses API related objects. It's -useful when you're creating Responses objects from non-Responses APIs, e.g. the OpenAI Chat -Completions API or other LLM providers. -""" diff --git a/src/agents/models/interface.py b/src/agents/models/interface.py deleted file mode 100644 index e9a8700c..00000000 --- a/src/agents/models/interface.py +++ /dev/null @@ -1,107 +0,0 @@ -from __future__ import annotations - -import abc -import enum -from collections.abc import AsyncIterator -from typing import TYPE_CHECKING - -from ..agent_output import AgentOutputSchema -from ..handoffs import Handoff -from ..items import ModelResponse, TResponseInputItem, TResponseStreamEvent -from ..tool import Tool - -if TYPE_CHECKING: - from ..model_settings import ModelSettings - - -class ModelTracing(enum.Enum): - DISABLED = 0 - """Tracing is disabled entirely.""" - - ENABLED = 1 - """Tracing is enabled, and all data is included.""" - - ENABLED_WITHOUT_DATA = 2 - """Tracing is enabled, but inputs/outputs are not included.""" - - def is_disabled(self) -> bool: - return self == ModelTracing.DISABLED - - def include_data(self) -> bool: - return self == ModelTracing.ENABLED - - -class Model(abc.ABC): - """The base interface for calling an LLM.""" - - @abc.abstractmethod - async def get_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - tracing: ModelTracing, - ) -> ModelResponse: - """Get a response from the model. - - Args: - system_instructions: The system instructions to use. - input: The input items to the model, in OpenAI Responses format. - model_settings: The model settings to use. - tools: The tools available to the model. - output_schema: The output schema to use. - handoffs: The handoffs available to the model. - tracing: Tracing configuration. - - Returns: - The full model response. - """ - pass - - @abc.abstractmethod - def stream_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - tracing: ModelTracing, - ) -> AsyncIterator[TResponseStreamEvent]: - """Stream a response from the model. - - Args: - system_instructions: The system instructions to use. - input: The input items to the model, in OpenAI Responses format. - model_settings: The model settings to use. - tools: The tools available to the model. - output_schema: The output schema to use. - handoffs: The handoffs available to the model. - tracing: Tracing configuration. - - Returns: - An iterator of response stream events, in OpenAI Responses format. - """ - pass - - -class ModelProvider(abc.ABC): - """The base interface for a model provider. - - Model provider is responsible for looking up Models by name. - """ - - @abc.abstractmethod - def get_model(self, model_name: str | None) -> Model: - """Get a model by name. - - Args: - model_name: The name of the model to get. - - Returns: - The model. - """ diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py deleted file mode 100644 index 807c6512..00000000 --- a/src/agents/models/openai_chatcompletions.py +++ /dev/null @@ -1,1014 +0,0 @@ -from __future__ import annotations - -import dataclasses -import json -import time -from collections.abc import AsyncIterator, Iterable -from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Literal, cast, overload - -from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream, NotGiven -from openai.types import ChatModel -from openai.types.chat import ( - ChatCompletion, - ChatCompletionAssistantMessageParam, - ChatCompletionChunk, - ChatCompletionContentPartImageParam, - ChatCompletionContentPartParam, - ChatCompletionContentPartTextParam, - ChatCompletionDeveloperMessageParam, - ChatCompletionMessage, - ChatCompletionMessageParam, - ChatCompletionMessageToolCallParam, - ChatCompletionSystemMessageParam, - ChatCompletionToolChoiceOptionParam, - ChatCompletionToolMessageParam, - ChatCompletionUserMessageParam, -) -from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam -from openai.types.chat.completion_create_params import ResponseFormat -from openai.types.completion_usage import CompletionUsage -from openai.types.responses import ( - EasyInputMessageParam, - Response, - ResponseCompletedEvent, - ResponseContentPartAddedEvent, - ResponseContentPartDoneEvent, - ResponseCreatedEvent, - ResponseFileSearchToolCallParam, - ResponseFunctionCallArgumentsDeltaEvent, - ResponseFunctionToolCall, - ResponseFunctionToolCallParam, - ResponseInputContentParam, - ResponseInputImageParam, - ResponseInputTextParam, - ResponseOutputItem, - ResponseOutputItemAddedEvent, - ResponseOutputItemDoneEvent, - ResponseOutputMessage, - ResponseOutputMessageParam, - ResponseOutputRefusal, - ResponseOutputText, - ResponseRefusalDeltaEvent, - ResponseTextDeltaEvent, - ResponseUsage, -) -from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message -from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails - -from .. import _debug -from ..agent_output import AgentOutputSchema -from ..exceptions import AgentsException, UserError -from ..handoffs import Handoff -from ..items import ModelResponse, TResponseInputItem, TResponseOutputItem, TResponseStreamEvent -from ..logger import logger -from ..tool import FunctionTool, Tool -from ..tracing import generation_span -from ..tracing.span_data import GenerationSpanData -from ..tracing.spans import Span -from ..usage import Usage -from ..version import __version__ -from .fake_id import FAKE_RESPONSES_ID -from .interface import Model, ModelTracing - -if TYPE_CHECKING: - from ..model_settings import ModelSettings - - -_USER_AGENT = f"Agents/Python {__version__}" -_HEADERS = {"User-Agent": _USER_AGENT} - - -@dataclass -class _StreamingState: - started: bool = False - text_content_index_and_output: tuple[int, ResponseOutputText] | None = None - refusal_content_index_and_output: tuple[int, ResponseOutputRefusal] | None = None - function_calls: dict[int, ResponseFunctionToolCall] = field(default_factory=dict) - - -class OpenAIChatCompletionsModel(Model): - def __init__( - self, - model: str | ChatModel, - openai_client: AsyncOpenAI, - ) -> None: - self.model = model - self._client = openai_client - - def _non_null_or_not_given(self, value: Any) -> Any: - return value if value is not None else NOT_GIVEN - - async def get_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - tracing: ModelTracing, - ) -> ModelResponse: - with generation_span( - model=str(self.model), - model_config=dataclasses.asdict(model_settings) - | {"base_url": str(self._client.base_url)}, - disabled=tracing.is_disabled(), - ) as span_generation: - response = await self._fetch_response( - system_instructions, - input, - model_settings, - tools, - output_schema, - handoffs, - span_generation, - tracing, - stream=False, - ) - - if _debug.DONT_LOG_MODEL_DATA: - logger.debug("Received model response") - else: - logger.debug( - f"LLM resp:\n{json.dumps(response.choices[0].message.model_dump(), indent=2)}\n" - ) - - usage = ( - Usage( - requests=1, - input_tokens=response.usage.prompt_tokens, - output_tokens=response.usage.completion_tokens, - total_tokens=response.usage.total_tokens, - ) - if response.usage - else Usage() - ) - if tracing.include_data(): - span_generation.span_data.output = [response.choices[0].message.model_dump()] - span_generation.span_data.usage = { - "input_tokens": usage.input_tokens, - "output_tokens": usage.output_tokens, - } - - items = _Converter.message_to_output_items(response.choices[0].message) - - return ModelResponse( - output=items, - usage=usage, - referenceable_id=None, - ) - - async def stream_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - tracing: ModelTracing, - ) -> AsyncIterator[TResponseStreamEvent]: - """ - Yields a partial message as it is generated, as well as the usage information. - """ - with generation_span( - model=str(self.model), - model_config=dataclasses.asdict(model_settings) - | {"base_url": str(self._client.base_url)}, - disabled=tracing.is_disabled(), - ) as span_generation: - response, stream = await self._fetch_response( - system_instructions, - input, - model_settings, - tools, - output_schema, - handoffs, - span_generation, - tracing, - stream=True, - ) - - usage: CompletionUsage | None = None - state = _StreamingState() - - async for chunk in stream: - if not state.started: - state.started = True - yield ResponseCreatedEvent( - response=response, - type="response.created", - ) - - # The usage is only available in the last chunk - usage = chunk.usage - - if not chunk.choices or not chunk.choices[0].delta: - continue - - delta = chunk.choices[0].delta - - # Handle text - if delta.content: - if not state.text_content_index_and_output: - # Initialize a content tracker for streaming text - state.text_content_index_and_output = ( - 0 if not state.refusal_content_index_and_output else 1, - ResponseOutputText( - text="", - type="output_text", - annotations=[], - ), - ) - # Start a new assistant message stream - assistant_item = ResponseOutputMessage( - id=FAKE_RESPONSES_ID, - content=[], - role="assistant", - type="message", - status="in_progress", - ) - # Notify consumers of the start of a new output message + first content part - yield ResponseOutputItemAddedEvent( - item=assistant_item, - output_index=0, - type="response.output_item.added", - ) - yield ResponseContentPartAddedEvent( - content_index=state.text_content_index_and_output[0], - item_id=FAKE_RESPONSES_ID, - output_index=0, - part=ResponseOutputText( - text="", - type="output_text", - annotations=[], - ), - type="response.content_part.added", - ) - # Emit the delta for this segment of content - yield ResponseTextDeltaEvent( - content_index=state.text_content_index_and_output[0], - delta=delta.content, - item_id=FAKE_RESPONSES_ID, - output_index=0, - type="response.output_text.delta", - ) - # Accumulate the text into the response part - state.text_content_index_and_output[1].text += delta.content - - # Handle refusals (model declines to answer) - if delta.refusal: - if not state.refusal_content_index_and_output: - # Initialize a content tracker for streaming refusal text - state.refusal_content_index_and_output = ( - 0 if not state.text_content_index_and_output else 1, - ResponseOutputRefusal(refusal="", type="refusal"), - ) - # Start a new assistant message if one doesn't exist yet (in-progress) - assistant_item = ResponseOutputMessage( - id=FAKE_RESPONSES_ID, - content=[], - role="assistant", - type="message", - status="in_progress", - ) - # Notify downstream that assistant message + first content part are starting - yield ResponseOutputItemAddedEvent( - item=assistant_item, - output_index=0, - type="response.output_item.added", - ) - yield ResponseContentPartAddedEvent( - content_index=state.refusal_content_index_and_output[0], - item_id=FAKE_RESPONSES_ID, - output_index=0, - part=ResponseOutputText( - text="", - type="output_text", - annotations=[], - ), - type="response.content_part.added", - ) - # Emit the delta for this segment of refusal - yield ResponseRefusalDeltaEvent( - content_index=state.refusal_content_index_and_output[0], - delta=delta.refusal, - item_id=FAKE_RESPONSES_ID, - output_index=0, - type="response.refusal.delta", - ) - # Accumulate the refusal string in the output part - state.refusal_content_index_and_output[1].refusal += delta.refusal - - # Handle tool calls - # Because we don't know the name of the function until the end of the stream, we'll - # save everything and yield events at the end - if delta.tool_calls: - for tc_delta in delta.tool_calls: - if tc_delta.index not in state.function_calls: - state.function_calls[tc_delta.index] = ResponseFunctionToolCall( - id=FAKE_RESPONSES_ID, - arguments="", - name="", - type="function_call", - call_id="", - ) - tc_function = tc_delta.function - - state.function_calls[tc_delta.index].arguments += ( - tc_function.arguments if tc_function else "" - ) or "" - state.function_calls[tc_delta.index].name += ( - tc_function.name if tc_function else "" - ) or "" - state.function_calls[tc_delta.index].call_id += tc_delta.id or "" - - function_call_starting_index = 0 - if state.text_content_index_and_output: - function_call_starting_index += 1 - # Send end event for this content part - yield ResponseContentPartDoneEvent( - content_index=state.text_content_index_and_output[0], - item_id=FAKE_RESPONSES_ID, - output_index=0, - part=state.text_content_index_and_output[1], - type="response.content_part.done", - ) - - if state.refusal_content_index_and_output: - function_call_starting_index += 1 - # Send end event for this content part - yield ResponseContentPartDoneEvent( - content_index=state.refusal_content_index_and_output[0], - item_id=FAKE_RESPONSES_ID, - output_index=0, - part=state.refusal_content_index_and_output[1], - type="response.content_part.done", - ) - - # Actually send events for the function calls - for function_call in state.function_calls.values(): - # First, a ResponseOutputItemAdded for the function call - yield ResponseOutputItemAddedEvent( - item=ResponseFunctionToolCall( - id=FAKE_RESPONSES_ID, - call_id=function_call.call_id, - arguments=function_call.arguments, - name=function_call.name, - type="function_call", - ), - output_index=function_call_starting_index, - type="response.output_item.added", - ) - # Then, yield the args - yield ResponseFunctionCallArgumentsDeltaEvent( - delta=function_call.arguments, - item_id=FAKE_RESPONSES_ID, - output_index=function_call_starting_index, - type="response.function_call_arguments.delta", - ) - # Finally, the ResponseOutputItemDone - yield ResponseOutputItemDoneEvent( - item=ResponseFunctionToolCall( - id=FAKE_RESPONSES_ID, - call_id=function_call.call_id, - arguments=function_call.arguments, - name=function_call.name, - type="function_call", - ), - output_index=function_call_starting_index, - type="response.output_item.done", - ) - - # Finally, send the Response completed event - outputs: list[ResponseOutputItem] = [] - if state.text_content_index_and_output or state.refusal_content_index_and_output: - assistant_msg = ResponseOutputMessage( - id=FAKE_RESPONSES_ID, - content=[], - role="assistant", - type="message", - status="completed", - ) - if state.text_content_index_and_output: - assistant_msg.content.append(state.text_content_index_and_output[1]) - if state.refusal_content_index_and_output: - assistant_msg.content.append(state.refusal_content_index_and_output[1]) - outputs.append(assistant_msg) - - # send a ResponseOutputItemDone for the assistant message - yield ResponseOutputItemDoneEvent( - item=assistant_msg, - output_index=0, - type="response.output_item.done", - ) - - for function_call in state.function_calls.values(): - outputs.append(function_call) - - final_response = response.model_copy() - final_response.output = outputs - final_response.usage = ( - ResponseUsage( - input_tokens=usage.prompt_tokens, - output_tokens=usage.completion_tokens, - total_tokens=usage.total_tokens, - output_tokens_details=OutputTokensDetails( - reasoning_tokens=usage.completion_tokens_details.reasoning_tokens - if usage.completion_tokens_details - and usage.completion_tokens_details.reasoning_tokens - else 0 - ), - input_tokens_details=InputTokensDetails( - cached_tokens=usage.prompt_tokens_details.cached_tokens - if usage.prompt_tokens_details and usage.prompt_tokens_details.cached_tokens - else 0 - ), - ) - if usage - else None - ) - - yield ResponseCompletedEvent( - response=final_response, - type="response.completed", - ) - if tracing.include_data(): - span_generation.span_data.output = [final_response.model_dump()] - - if usage: - span_generation.span_data.usage = { - "input_tokens": usage.prompt_tokens, - "output_tokens": usage.completion_tokens, - } - - @overload - async def _fetch_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - span: Span[GenerationSpanData], - tracing: ModelTracing, - stream: Literal[True], - ) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ... - - @overload - async def _fetch_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - span: Span[GenerationSpanData], - tracing: ModelTracing, - stream: Literal[False], - ) -> ChatCompletion: ... - - async def _fetch_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - span: Span[GenerationSpanData], - tracing: ModelTracing, - stream: bool = False, - ) -> ChatCompletion | tuple[Response, AsyncStream[ChatCompletionChunk]]: - converted_messages = _Converter.items_to_messages(input) - - if system_instructions: - converted_messages.insert( - 0, - { - "content": system_instructions, - "role": "system", - }, - ) - if tracing.include_data(): - span.span_data.input = converted_messages - - parallel_tool_calls = ( - True if model_settings.parallel_tool_calls and tools and len(tools) > 0 else NOT_GIVEN - ) - tool_choice = _Converter.convert_tool_choice(model_settings.tool_choice) - response_format = _Converter.convert_response_format(output_schema) - - converted_tools = [ToolConverter.to_openai(tool) for tool in tools] if tools else [] - - for handoff in handoffs: - converted_tools.append(ToolConverter.convert_handoff_tool(handoff)) - - if _debug.DONT_LOG_MODEL_DATA: - logger.debug("Calling LLM") - else: - logger.debug( - f"{json.dumps(converted_messages, indent=2)}\n" - f"Tools:\n{json.dumps(converted_tools, indent=2)}\n" - f"Stream: {stream}\n" - f"Tool choice: {tool_choice}\n" - f"Response format: {response_format}\n" - ) - - reasoning_effort = model_settings.reasoning.effort if model_settings.reasoning else None - store = _Converter.get_store_param(self._get_client(), model_settings) - - stream_options = _Converter.get_stream_options_param(self._get_client(), model_settings) - - ret = await self._get_client().chat.completions.create( - model=self.model, - messages=converted_messages, - tools=converted_tools or NOT_GIVEN, - temperature=self._non_null_or_not_given(model_settings.temperature), - top_p=self._non_null_or_not_given(model_settings.top_p), - frequency_penalty=self._non_null_or_not_given(model_settings.frequency_penalty), - presence_penalty=self._non_null_or_not_given(model_settings.presence_penalty), - max_tokens=self._non_null_or_not_given(model_settings.max_tokens), - tool_choice=tool_choice, - response_format=response_format, - parallel_tool_calls=parallel_tool_calls, - stream=stream, - stream_options=self._non_null_or_not_given(stream_options), - store=self._non_null_or_not_given(store), - reasoning_effort=self._non_null_or_not_given(reasoning_effort), - extra_headers=_HEADERS, - metadata=self._non_null_or_not_given(model_settings.metadata), - ) - - if isinstance(ret, ChatCompletion): - return ret - - response = Response( - id=FAKE_RESPONSES_ID, - created_at=time.time(), - model=self.model, - object="response", - output=[], - tool_choice=cast(Literal["auto", "required", "none"], tool_choice) - if tool_choice != NOT_GIVEN - else "auto", - top_p=model_settings.top_p, - temperature=model_settings.temperature, - tools=[], - parallel_tool_calls=parallel_tool_calls or False, - reasoning=model_settings.reasoning, - ) - return response, ret - - def _get_client(self) -> AsyncOpenAI: - if self._client is None: - self._client = AsyncOpenAI() - return self._client - - -class _Converter: - - @classmethod - def is_openai(cls, client: AsyncOpenAI): - return str(client.base_url).startswith("https://api.openai.com") - - @classmethod - def get_store_param(cls, client: AsyncOpenAI, model_settings: ModelSettings) -> bool | None: - # Match the behavior of Responses where store is True when not given - default_store = True if cls.is_openai(client) else None - return model_settings.store if model_settings.store is not None else default_store - - @classmethod - def get_stream_options_param( - cls, client: AsyncOpenAI, model_settings: ModelSettings - ) -> dict[str, bool] | None: - default_include_usage = True if cls.is_openai(client) else None - include_usage = model_settings.include_usage if model_settings.include_usage is not None \ - else default_include_usage - stream_options = {"include_usage": include_usage} if include_usage is not None else None - return stream_options - - @classmethod - def convert_tool_choice( - cls, tool_choice: Literal["auto", "required", "none"] | str | None - ) -> ChatCompletionToolChoiceOptionParam | NotGiven: - if tool_choice is None: - return NOT_GIVEN - elif tool_choice == "auto": - return "auto" - elif tool_choice == "required": - return "required" - elif tool_choice == "none": - return "none" - else: - return { - "type": "function", - "function": { - "name": tool_choice, - }, - } - - @classmethod - def convert_response_format( - cls, final_output_schema: AgentOutputSchema | None - ) -> ResponseFormat | NotGiven: - if not final_output_schema or final_output_schema.is_plain_text(): - return NOT_GIVEN - - return { - "type": "json_schema", - "json_schema": { - "name": "final_output", - "strict": final_output_schema.strict_json_schema, - "schema": final_output_schema.json_schema(), - }, - } - - @classmethod - def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TResponseOutputItem]: - items: list[TResponseOutputItem] = [] - - message_item = ResponseOutputMessage( - id=FAKE_RESPONSES_ID, - content=[], - role="assistant", - type="message", - status="completed", - ) - if message.content: - message_item.content.append( - ResponseOutputText(text=message.content, type="output_text", annotations=[]) - ) - if message.refusal: - message_item.content.append( - ResponseOutputRefusal(refusal=message.refusal, type="refusal") - ) - if message.audio: - raise AgentsException("Audio is not currently supported") - - if message_item.content: - items.append(message_item) - - if message.tool_calls: - for tool_call in message.tool_calls: - items.append( - ResponseFunctionToolCall( - id=FAKE_RESPONSES_ID, - call_id=tool_call.id, - arguments=tool_call.function.arguments, - name=tool_call.function.name, - type="function_call", - ) - ) - - return items - - @classmethod - def maybe_easy_input_message(cls, item: Any) -> EasyInputMessageParam | None: - if not isinstance(item, dict): - return None - - keys = item.keys() - # EasyInputMessageParam only has these two keys - if keys != {"content", "role"}: - return None - - role = item.get("role", None) - if role not in ("user", "assistant", "system", "developer"): - return None - - if "content" not in item: - return None - - return cast(EasyInputMessageParam, item) - - @classmethod - def maybe_input_message(cls, item: Any) -> Message | None: - if ( - isinstance(item, dict) - and item.get("type") == "message" - and item.get("role") - in ( - "user", - "system", - "developer", - ) - ): - return cast(Message, item) - - return None - - @classmethod - def maybe_file_search_call(cls, item: Any) -> ResponseFileSearchToolCallParam | None: - if isinstance(item, dict) and item.get("type") == "file_search_call": - return cast(ResponseFileSearchToolCallParam, item) - return None - - @classmethod - def maybe_function_tool_call(cls, item: Any) -> ResponseFunctionToolCallParam | None: - if isinstance(item, dict) and item.get("type") == "function_call": - return cast(ResponseFunctionToolCallParam, item) - return None - - @classmethod - def maybe_function_tool_call_output( - cls, - item: Any, - ) -> FunctionCallOutput | None: - if isinstance(item, dict) and item.get("type") == "function_call_output": - return cast(FunctionCallOutput, item) - return None - - @classmethod - def maybe_item_reference(cls, item: Any) -> ItemReference | None: - if isinstance(item, dict) and item.get("type") == "item_reference": - return cast(ItemReference, item) - return None - - @classmethod - def maybe_response_output_message(cls, item: Any) -> ResponseOutputMessageParam | None: - # ResponseOutputMessage is only used for messages with role assistant - if ( - isinstance(item, dict) - and item.get("type") == "message" - and item.get("role") == "assistant" - ): - return cast(ResponseOutputMessageParam, item) - return None - - @classmethod - def extract_text_content( - cls, content: str | Iterable[ResponseInputContentParam] - ) -> str | list[ChatCompletionContentPartTextParam]: - all_content = cls.extract_all_content(content) - if isinstance(all_content, str): - return all_content - out: list[ChatCompletionContentPartTextParam] = [] - for c in all_content: - if c.get("type") == "text": - out.append(cast(ChatCompletionContentPartTextParam, c)) - return out - - @classmethod - def extract_all_content( - cls, content: str | Iterable[ResponseInputContentParam] - ) -> str | list[ChatCompletionContentPartParam]: - if isinstance(content, str): - return content - out: list[ChatCompletionContentPartParam] = [] - - for c in content: - if isinstance(c, dict) and c.get("type") == "input_text": - casted_text_param = cast(ResponseInputTextParam, c) - out.append( - ChatCompletionContentPartTextParam( - type="text", - text=casted_text_param["text"], - ) - ) - elif isinstance(c, dict) and c.get("type") == "input_image": - casted_image_param = cast(ResponseInputImageParam, c) - if "image_url" not in casted_image_param or not casted_image_param["image_url"]: - raise UserError( - f"Only image URLs are supported for input_image {casted_image_param}" - ) - out.append( - ChatCompletionContentPartImageParam( - type="image_url", - image_url={ - "url": casted_image_param["image_url"], - "detail": casted_image_param["detail"], - }, - ) - ) - elif isinstance(c, dict) and c.get("type") == "input_file": - raise UserError(f"File uploads are not supported for chat completions {c}") - else: - raise UserError(f"Unknown content: {c}") - return out - - @classmethod - def items_to_messages( - cls, - items: str | Iterable[TResponseInputItem], - ) -> list[ChatCompletionMessageParam]: - """ - Convert a sequence of 'Item' objects into a list of ChatCompletionMessageParam. - - Rules: - - EasyInputMessage or InputMessage (role=user) => ChatCompletionUserMessageParam - - EasyInputMessage or InputMessage (role=system) => ChatCompletionSystemMessageParam - - EasyInputMessage or InputMessage (role=developer) => ChatCompletionDeveloperMessageParam - - InputMessage (role=assistant) => Start or flush a ChatCompletionAssistantMessageParam - - response_output_message => Also produces/flushes a ChatCompletionAssistantMessageParam - - tool calls get attached to the *current* assistant message, or create one if none. - - tool outputs => ChatCompletionToolMessageParam - """ - - if isinstance(items, str): - return [ - ChatCompletionUserMessageParam( - role="user", - content=items, - ) - ] - - result: list[ChatCompletionMessageParam] = [] - current_assistant_msg: ChatCompletionAssistantMessageParam | None = None - - def flush_assistant_message() -> None: - nonlocal current_assistant_msg - if current_assistant_msg is not None: - # The API doesn't support empty arrays for tool_calls - if not current_assistant_msg.get("tool_calls"): - del current_assistant_msg["tool_calls"] - result.append(current_assistant_msg) - current_assistant_msg = None - - def ensure_assistant_message() -> ChatCompletionAssistantMessageParam: - nonlocal current_assistant_msg - if current_assistant_msg is None: - current_assistant_msg = ChatCompletionAssistantMessageParam(role="assistant") - current_assistant_msg["tool_calls"] = [] - return current_assistant_msg - - for item in items: - # 1) Check easy input message - if easy_msg := cls.maybe_easy_input_message(item): - role = easy_msg["role"] - content = easy_msg["content"] - - if role == "user": - flush_assistant_message() - msg_user: ChatCompletionUserMessageParam = { - "role": "user", - "content": cls.extract_all_content(content), - } - result.append(msg_user) - elif role == "system": - flush_assistant_message() - msg_system: ChatCompletionSystemMessageParam = { - "role": "system", - "content": cls.extract_text_content(content), - } - result.append(msg_system) - elif role == "developer": - flush_assistant_message() - msg_developer: ChatCompletionDeveloperMessageParam = { - "role": "developer", - "content": cls.extract_text_content(content), - } - result.append(msg_developer) - elif role == "assistant": - flush_assistant_message() - msg_assistant: ChatCompletionAssistantMessageParam = { - "role": "assistant", - "content": cls.extract_text_content(content), - } - result.append(msg_assistant) - else: - raise UserError(f"Unexpected role in easy_input_message: {role}") - - # 2) Check input message - elif in_msg := cls.maybe_input_message(item): - role = in_msg["role"] - content = in_msg["content"] - flush_assistant_message() - - if role == "user": - msg_user = { - "role": "user", - "content": cls.extract_all_content(content), - } - result.append(msg_user) - elif role == "system": - msg_system = { - "role": "system", - "content": cls.extract_text_content(content), - } - result.append(msg_system) - elif role == "developer": - msg_developer = { - "role": "developer", - "content": cls.extract_text_content(content), - } - result.append(msg_developer) - else: - raise UserError(f"Unexpected role in input_message: {role}") - - # 3) response output message => assistant - elif resp_msg := cls.maybe_response_output_message(item): - flush_assistant_message() - new_asst = ChatCompletionAssistantMessageParam(role="assistant") - contents = resp_msg["content"] - - text_segments = [] - for c in contents: - if c["type"] == "output_text": - text_segments.append(c["text"]) - elif c["type"] == "refusal": - new_asst["refusal"] = c["refusal"] - elif c["type"] == "output_audio": - # Can't handle this, b/c chat completions expects an ID which we dont have - raise UserError( - f"Only audio IDs are supported for chat completions, but got: {c}" - ) - else: - raise UserError(f"Unknown content type in ResponseOutputMessage: {c}") - - if text_segments: - combined = "\n".join(text_segments) - new_asst["content"] = combined - - new_asst["tool_calls"] = [] - current_assistant_msg = new_asst - - # 4) function/file-search calls => attach to assistant - elif file_search := cls.maybe_file_search_call(item): - asst = ensure_assistant_message() - tool_calls = list(asst.get("tool_calls", [])) - new_tool_call = ChatCompletionMessageToolCallParam( - id=file_search["id"], - type="function", - function={ - "name": "file_search_call", - "arguments": json.dumps( - { - "queries": file_search.get("queries", []), - "status": file_search.get("status"), - } - ), - }, - ) - tool_calls.append(new_tool_call) - asst["tool_calls"] = tool_calls - - elif func_call := cls.maybe_function_tool_call(item): - asst = ensure_assistant_message() - tool_calls = list(asst.get("tool_calls", [])) - arguments = func_call["arguments"] if func_call["arguments"] else "{}" - new_tool_call = ChatCompletionMessageToolCallParam( - id=func_call["call_id"], - type="function", - function={ - "name": func_call["name"], - "arguments": arguments, - }, - ) - tool_calls.append(new_tool_call) - asst["tool_calls"] = tool_calls - # 5) function call output => tool message - elif func_output := cls.maybe_function_tool_call_output(item): - flush_assistant_message() - msg: ChatCompletionToolMessageParam = { - "role": "tool", - "tool_call_id": func_output["call_id"], - "content": func_output["output"], - } - result.append(msg) - - # 6) item reference => handle or raise - elif item_ref := cls.maybe_item_reference(item): - raise UserError( - f"Encountered an item_reference, which is not supported: {item_ref}" - ) - - # 7) If we haven't recognized it => fail or ignore - else: - raise UserError(f"Unhandled item type or structure: {item}") - - flush_assistant_message() - return result - - -class ToolConverter: - @classmethod - def to_openai(cls, tool: Tool) -> ChatCompletionToolParam: - if isinstance(tool, FunctionTool): - return { - "type": "function", - "function": { - "name": tool.name, - "description": tool.description or "", - "parameters": tool.params_json_schema, - }, - } - - raise UserError( - f"Hosted tools are not supported with the ChatCompletions API. Got tool type: " - f"{type(tool)}, tool: {tool}" - ) - - @classmethod - def convert_handoff_tool(cls, handoff: Handoff[Any]) -> ChatCompletionToolParam: - return { - "type": "function", - "function": { - "name": handoff.tool_name, - "description": handoff.tool_description, - "parameters": handoff.input_json_schema, - }, - } diff --git a/src/agents/models/openai_provider.py b/src/agents/models/openai_provider.py deleted file mode 100644 index e7e922ab..00000000 --- a/src/agents/models/openai_provider.py +++ /dev/null @@ -1,91 +0,0 @@ -from __future__ import annotations - -import httpx -from openai import AsyncOpenAI, DefaultAsyncHttpxClient - -from . import _openai_shared -from .interface import Model, ModelProvider -from .openai_chatcompletions import OpenAIChatCompletionsModel -from .openai_responses import OpenAIResponsesModel - -DEFAULT_MODEL: str = "gpt-4o" - - -_http_client: httpx.AsyncClient | None = None - - -# If we create a new httpx client for each request, that would mean no sharing of connection pools, -# which would mean worse latency and resource usage. So, we share the client across requests. -def shared_http_client() -> httpx.AsyncClient: - global _http_client - if _http_client is None: - _http_client = DefaultAsyncHttpxClient() - return _http_client - - -class OpenAIProvider(ModelProvider): - def __init__( - self, - *, - api_key: str | None = None, - base_url: str | None = None, - openai_client: AsyncOpenAI | None = None, - organization: str | None = None, - project: str | None = None, - use_responses: bool | None = None, - ) -> None: - """Create a new OpenAI provider. - - Args: - api_key: The API key to use for the OpenAI client. If not provided, we will use the - default API key. - base_url: The base URL to use for the OpenAI client. If not provided, we will use the - default base URL. - openai_client: An optional OpenAI client to use. If not provided, we will create a new - OpenAI client using the api_key and base_url. - organization: The organization to use for the OpenAI client. - project: The project to use for the OpenAI client. - use_responses: Whether to use the OpenAI responses API. - """ - if openai_client is not None: - assert api_key is None and base_url is None, ( - "Don't provide api_key or base_url if you provide openai_client" - ) - self._client: AsyncOpenAI | None = openai_client - else: - self._client = None - self._stored_api_key = api_key - self._stored_base_url = base_url - self._stored_organization = organization - self._stored_project = project - - if use_responses is not None: - self._use_responses = use_responses - else: - self._use_responses = _openai_shared.get_use_responses_by_default() - - # We lazy load the client in case you never actually use OpenAIProvider(). Otherwise - # AsyncOpenAI() raises an error if you don't have an API key set. - def _get_client(self) -> AsyncOpenAI: - if self._client is None: - self._client = _openai_shared.get_default_openai_client() or AsyncOpenAI( - api_key=self._stored_api_key or _openai_shared.get_default_openai_key(), - base_url=self._stored_base_url, - organization=self._stored_organization, - project=self._stored_project, - http_client=shared_http_client(), - ) - - return self._client - - def get_model(self, model_name: str | None) -> Model: - if model_name is None: - model_name = DEFAULT_MODEL - - client = self._get_client() - - return ( - OpenAIResponsesModel(model=model_name, openai_client=client) - if self._use_responses - else OpenAIChatCompletionsModel(model=model_name, openai_client=client) - ) diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py deleted file mode 100644 index 06828884..00000000 --- a/src/agents/models/openai_responses.py +++ /dev/null @@ -1,393 +0,0 @@ -from __future__ import annotations - -import json -from collections.abc import AsyncIterator -from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Literal, overload - -from openai import NOT_GIVEN, APIStatusError, AsyncOpenAI, AsyncStream, NotGiven -from openai.types import ChatModel -from openai.types.responses import ( - Response, - ResponseCompletedEvent, - ResponseStreamEvent, - ResponseTextConfigParam, - ToolParam, - WebSearchToolParam, - response_create_params, -) - -from .. import _debug -from ..agent_output import AgentOutputSchema -from ..exceptions import UserError -from ..handoffs import Handoff -from ..items import ItemHelpers, ModelResponse, TResponseInputItem -from ..logger import logger -from ..tool import ComputerTool, FileSearchTool, FunctionTool, Tool, WebSearchTool -from ..tracing import SpanError, response_span -from ..usage import Usage -from ..version import __version__ -from .interface import Model, ModelTracing - -if TYPE_CHECKING: - from ..model_settings import ModelSettings - - -_USER_AGENT = f"Agents/Python {__version__}" -_HEADERS = {"User-Agent": _USER_AGENT} - -# From the Responses API -IncludeLiteral = Literal[ - "file_search_call.results", - "message.input_image.image_url", - "computer_call_output.output.image_url", -] - - -class OpenAIResponsesModel(Model): - """ - Implementation of `Model` that uses the OpenAI Responses API. - """ - - def __init__( - self, - model: str | ChatModel, - openai_client: AsyncOpenAI, - ) -> None: - self.model = model - self._client = openai_client - - def _non_null_or_not_given(self, value: Any) -> Any: - return value if value is not None else NOT_GIVEN - - async def get_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - tracing: ModelTracing, - ) -> ModelResponse: - with response_span(disabled=tracing.is_disabled()) as span_response: - try: - response = await self._fetch_response( - system_instructions, - input, - model_settings, - tools, - output_schema, - handoffs, - stream=False, - ) - - if _debug.DONT_LOG_MODEL_DATA: - logger.debug("LLM responded") - else: - logger.debug( - "LLM resp:\n" - f"{json.dumps([x.model_dump() for x in response.output], indent=2)}\n" - ) - - usage = ( - Usage( - requests=1, - input_tokens=response.usage.input_tokens, - output_tokens=response.usage.output_tokens, - total_tokens=response.usage.total_tokens, - ) - if response.usage - else Usage() - ) - - if tracing.include_data(): - span_response.span_data.response = response - span_response.span_data.input = input - except Exception as e: - span_response.set_error( - SpanError( - message="Error getting response", - data={ - "error": str(e) if tracing.include_data() else e.__class__.__name__, - }, - ) - ) - request_id = e.request_id if isinstance(e, APIStatusError) else None - logger.error(f"Error getting response: {e}. (request_id: {request_id})") - raise - - return ModelResponse( - output=response.output, - usage=usage, - referenceable_id=response.id, - ) - - async def stream_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - tracing: ModelTracing, - ) -> AsyncIterator[ResponseStreamEvent]: - """ - Yields a partial message as it is generated, as well as the usage information. - """ - with response_span(disabled=tracing.is_disabled()) as span_response: - try: - stream = await self._fetch_response( - system_instructions, - input, - model_settings, - tools, - output_schema, - handoffs, - stream=True, - ) - - final_response: Response | None = None - - async for chunk in stream: - if isinstance(chunk, ResponseCompletedEvent): - final_response = chunk.response - yield chunk - - if final_response and tracing.include_data(): - span_response.span_data.response = final_response - span_response.span_data.input = input - - except Exception as e: - span_response.set_error( - SpanError( - message="Error streaming response", - data={ - "error": str(e) if tracing.include_data() else e.__class__.__name__, - }, - ) - ) - logger.error(f"Error streaming response: {e}") - raise - - @overload - async def _fetch_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - stream: Literal[True], - ) -> AsyncStream[ResponseStreamEvent]: ... - - @overload - async def _fetch_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - stream: Literal[False], - ) -> Response: ... - - async def _fetch_response( - self, - system_instructions: str | None, - input: str | list[TResponseInputItem], - model_settings: ModelSettings, - tools: list[Tool], - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - stream: Literal[True] | Literal[False] = False, - ) -> Response | AsyncStream[ResponseStreamEvent]: - list_input = ItemHelpers.input_to_new_input_list(input) - - parallel_tool_calls = ( - True - if model_settings.parallel_tool_calls and tools and len(tools) > 0 - else False - if model_settings.parallel_tool_calls is False - else NOT_GIVEN - ) - - tool_choice = Converter.convert_tool_choice(model_settings.tool_choice) - converted_tools = Converter.convert_tools(tools, handoffs) - response_format = Converter.get_response_format(output_schema) - - if _debug.DONT_LOG_MODEL_DATA: - logger.debug("Calling LLM") - else: - logger.debug( - f"Calling LLM {self.model} with input:\n" - f"{json.dumps(list_input, indent=2)}\n" - f"Tools:\n{json.dumps(converted_tools.tools, indent=2)}\n" - f"Stream: {stream}\n" - f"Tool choice: {tool_choice}\n" - f"Response format: {response_format}\n" - ) - - return await self._client.responses.create( - instructions=self._non_null_or_not_given(system_instructions), - model=self.model, - input=list_input, - include=converted_tools.includes, - tools=converted_tools.tools, - temperature=self._non_null_or_not_given(model_settings.temperature), - top_p=self._non_null_or_not_given(model_settings.top_p), - truncation=self._non_null_or_not_given(model_settings.truncation), - max_output_tokens=self._non_null_or_not_given(model_settings.max_tokens), - tool_choice=tool_choice, - parallel_tool_calls=parallel_tool_calls, - stream=stream, - extra_headers=_HEADERS, - text=response_format, - store=self._non_null_or_not_given(model_settings.store), - reasoning=self._non_null_or_not_given(model_settings.reasoning), - metadata=self._non_null_or_not_given(model_settings.metadata) - ) - - def _get_client(self) -> AsyncOpenAI: - if self._client is None: - self._client = AsyncOpenAI() - return self._client - - -@dataclass -class ConvertedTools: - tools: list[ToolParam] - includes: list[IncludeLiteral] - - -class Converter: - @classmethod - def convert_tool_choice( - cls, tool_choice: Literal["auto", "required", "none"] | str | None - ) -> response_create_params.ToolChoice | NotGiven: - if tool_choice is None: - return NOT_GIVEN - elif tool_choice == "required": - return "required" - elif tool_choice == "auto": - return "auto" - elif tool_choice == "none": - return "none" - elif tool_choice == "file_search": - return { - "type": "file_search", - } - elif tool_choice == "web_search_preview": - return { - "type": "web_search_preview", - } - elif tool_choice == "computer_use_preview": - return { - "type": "computer_use_preview", - } - else: - return { - "type": "function", - "name": tool_choice, - } - - @classmethod - def get_response_format( - cls, output_schema: AgentOutputSchema | None - ) -> ResponseTextConfigParam | NotGiven: - if output_schema is None or output_schema.is_plain_text(): - return NOT_GIVEN - else: - return { - "format": { - "type": "json_schema", - "name": "final_output", - "schema": output_schema.json_schema(), - "strict": output_schema.strict_json_schema, - } - } - - @classmethod - def convert_tools( - cls, - tools: list[Tool], - handoffs: list[Handoff[Any]], - ) -> ConvertedTools: - converted_tools: list[ToolParam] = [] - includes: list[IncludeLiteral] = [] - - computer_tools = [tool for tool in tools if isinstance(tool, ComputerTool)] - if len(computer_tools) > 1: - raise UserError(f"You can only provide one computer tool. Got {len(computer_tools)}") - - for tool in tools: - converted_tool, include = cls._convert_tool(tool) - converted_tools.append(converted_tool) - if include: - includes.append(include) - - for handoff in handoffs: - converted_tools.append(cls._convert_handoff_tool(handoff)) - - return ConvertedTools(tools=converted_tools, includes=includes) - - @classmethod - def _convert_tool(cls, tool: Tool) -> tuple[ToolParam, IncludeLiteral | None]: - """Returns converted tool and includes""" - - if isinstance(tool, FunctionTool): - converted_tool: ToolParam = { - "name": tool.name, - "parameters": tool.params_json_schema, - "strict": tool.strict_json_schema, - "type": "function", - "description": tool.description, - } - includes: IncludeLiteral | None = None - elif isinstance(tool, WebSearchTool): - ws: WebSearchToolParam = { - "type": "web_search_preview", - "user_location": tool.user_location, - "search_context_size": tool.search_context_size, - } - converted_tool = ws - includes = None - elif isinstance(tool, FileSearchTool): - converted_tool = { - "type": "file_search", - "vector_store_ids": tool.vector_store_ids, - } - if tool.max_num_results: - converted_tool["max_num_results"] = tool.max_num_results - if tool.ranking_options: - converted_tool["ranking_options"] = tool.ranking_options - if tool.filters: - converted_tool["filters"] = tool.filters - - includes = "file_search_call.results" if tool.include_search_results else None - elif isinstance(tool, ComputerTool): - converted_tool = { - "type": "computer_use_preview", - "environment": tool.computer.environment, - "display_width": tool.computer.dimensions[0], - "display_height": tool.computer.dimensions[1], - } - includes = None - - else: - raise UserError(f"Unknown tool type: {type(tool)}, tool") - - return converted_tool, includes - - @classmethod - def _convert_handoff_tool(cls, handoff: Handoff) -> ToolParam: - return { - "name": handoff.tool_name, - "parameters": handoff.input_json_schema, - "strict": handoff.strict_json_schema, - "type": "function", - "description": handoff.tool_description, - } diff --git a/src/agents/py.typed b/src/agents/py.typed deleted file mode 100644 index 8b137891..00000000 --- a/src/agents/py.typed +++ /dev/null @@ -1 +0,0 @@ - diff --git a/src/agents/result.py b/src/agents/result.py deleted file mode 100644 index 40a64806..00000000 --- a/src/agents/result.py +++ /dev/null @@ -1,225 +0,0 @@ -from __future__ import annotations - -import abc -import asyncio -from collections.abc import AsyncIterator -from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, cast - -from typing_extensions import TypeVar - -from ._run_impl import QueueCompleteSentinel -from .agent import Agent -from .agent_output import AgentOutputSchema -from .exceptions import InputGuardrailTripwireTriggered, MaxTurnsExceeded -from .guardrail import InputGuardrailResult, OutputGuardrailResult -from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem -from .logger import logger -from .stream_events import StreamEvent -from .tracing import Trace -from .util._pretty_print import pretty_print_result, pretty_print_run_result_streaming - -if TYPE_CHECKING: - from ._run_impl import QueueCompleteSentinel - from .agent import Agent - -T = TypeVar("T") - - -@dataclass -class RunResultBase(abc.ABC): - input: str | list[TResponseInputItem] - """The original input items i.e. the items before run() was called. This may be a mutated - version of the input, if there are handoff input filters that mutate the input. - """ - - new_items: list[RunItem] - """The new items generated during the agent run. These include things like new messages, tool - calls and their outputs, etc. - """ - - raw_responses: list[ModelResponse] - """The raw LLM responses generated by the model during the agent run.""" - - final_output: Any - """The output of the last agent.""" - - input_guardrail_results: list[InputGuardrailResult] - """Guardrail results for the input messages.""" - - output_guardrail_results: list[OutputGuardrailResult] - """Guardrail results for the final output of the agent.""" - - @property - @abc.abstractmethod - def last_agent(self) -> Agent[Any]: - """The last agent that was run.""" - - def final_output_as(self, cls: type[T], raise_if_incorrect_type: bool = False) -> T: - """A convenience method to cast the final output to a specific type. By default, the cast - is only for the typechecker. If you set `raise_if_incorrect_type` to True, we'll raise a - TypeError if the final output is not of the given type. - - Args: - cls: The type to cast the final output to. - raise_if_incorrect_type: If True, we'll raise a TypeError if the final output is not of - the given type. - - Returns: - The final output casted to the given type. - """ - if raise_if_incorrect_type and not isinstance(self.final_output, cls): - raise TypeError(f"Final output is not of type {cls.__name__}") - - return cast(T, self.final_output) - - def to_input_list(self) -> list[TResponseInputItem]: - """Creates a new input list, merging the original input with all the new items generated.""" - original_items: list[TResponseInputItem] = ItemHelpers.input_to_new_input_list(self.input) - new_items = [item.to_input_item() for item in self.new_items] - - return original_items + new_items - - -@dataclass -class RunResult(RunResultBase): - _last_agent: Agent[Any] - - @property - def last_agent(self) -> Agent[Any]: - """The last agent that was run.""" - return self._last_agent - - def __str__(self) -> str: - return pretty_print_result(self) - - -@dataclass -class RunResultStreaming(RunResultBase): - """The result of an agent run in streaming mode. You can use the `stream_events` method to - receive semantic events as they are generated. - - The streaming method will raise: - - A MaxTurnsExceeded exception if the agent exceeds the max_turns limit. - - A GuardrailTripwireTriggered exception if a guardrail is tripped. - """ - - current_agent: Agent[Any] - """The current agent that is running.""" - - current_turn: int - """The current turn number.""" - - max_turns: int - """The maximum number of turns the agent can run for.""" - - final_output: Any - """The final output of the agent. This is None until the agent has finished running.""" - - _current_agent_output_schema: AgentOutputSchema | None = field(repr=False) - - _trace: Trace | None = field(repr=False) - - is_complete: bool = False - """Whether the agent has finished running.""" - - # Queues that the background run_loop writes to - _event_queue: asyncio.Queue[StreamEvent | QueueCompleteSentinel] = field( - default_factory=asyncio.Queue, repr=False - ) - _input_guardrail_queue: asyncio.Queue[InputGuardrailResult] = field( - default_factory=asyncio.Queue, repr=False - ) - - # Store the asyncio tasks that we're waiting on - _run_impl_task: asyncio.Task[Any] | None = field(default=None, repr=False) - _input_guardrails_task: asyncio.Task[Any] | None = field(default=None, repr=False) - _output_guardrails_task: asyncio.Task[Any] | None = field(default=None, repr=False) - _stored_exception: Exception | None = field(default=None, repr=False) - - @property - def last_agent(self) -> Agent[Any]: - """The last agent that was run. Updates as the agent run progresses, so the true last agent - is only available after the agent run is complete. - """ - return self.current_agent - - async def stream_events(self) -> AsyncIterator[StreamEvent]: - """Stream deltas for new items as they are generated. We're using the types from the - OpenAI Responses API, so these are semantic events: each event has a `type` field that - describes the type of the event, along with the data for that event. - - This will raise: - - A MaxTurnsExceeded exception if the agent exceeds the max_turns limit. - - A GuardrailTripwireTriggered exception if a guardrail is tripped. - """ - while True: - self._check_errors() - if self._stored_exception: - logger.debug("Breaking due to stored exception") - self.is_complete = True - break - - if self.is_complete and self._event_queue.empty(): - break - - try: - item = await self._event_queue.get() - except asyncio.CancelledError: - break - - if isinstance(item, QueueCompleteSentinel): - self._event_queue.task_done() - # Check for errors, in case the queue was completed due to an exception - self._check_errors() - break - - yield item - self._event_queue.task_done() - - if self._trace: - self._trace.finish(reset_current=True) - - self._cleanup_tasks() - - if self._stored_exception: - raise self._stored_exception - - def _check_errors(self): - if self.current_turn > self.max_turns: - self._stored_exception = MaxTurnsExceeded(f"Max turns ({self.max_turns}) exceeded") - - # Fetch all the completed guardrail results from the queue and raise if needed - while not self._input_guardrail_queue.empty(): - guardrail_result = self._input_guardrail_queue.get_nowait() - if guardrail_result.output.tripwire_triggered: - self._stored_exception = InputGuardrailTripwireTriggered(guardrail_result) - - # Check the tasks for any exceptions - if self._run_impl_task and self._run_impl_task.done(): - exc = self._run_impl_task.exception() - if exc and isinstance(exc, Exception): - self._stored_exception = exc - - if self._input_guardrails_task and self._input_guardrails_task.done(): - exc = self._input_guardrails_task.exception() - if exc and isinstance(exc, Exception): - self._stored_exception = exc - - if self._output_guardrails_task and self._output_guardrails_task.done(): - exc = self._output_guardrails_task.exception() - if exc and isinstance(exc, Exception): - self._stored_exception = exc - - def _cleanup_tasks(self): - if self._run_impl_task and not self._run_impl_task.done(): - self._run_impl_task.cancel() - - if self._input_guardrails_task and not self._input_guardrails_task.done(): - self._input_guardrails_task.cancel() - - if self._output_guardrails_task and not self._output_guardrails_task.done(): - self._output_guardrails_task.cancel() - - def __str__(self) -> str: - return pretty_print_run_result_streaming(self) diff --git a/src/agents/run.py b/src/agents/run.py deleted file mode 100644 index 0159822a..00000000 --- a/src/agents/run.py +++ /dev/null @@ -1,942 +0,0 @@ -from __future__ import annotations - -import asyncio -import copy -from dataclasses import dataclass, field -from typing import Any, cast - -from openai.types.responses import ResponseCompletedEvent - -from ._run_impl import ( - AgentToolUseTracker, - NextStepFinalOutput, - NextStepHandoff, - NextStepRunAgain, - QueueCompleteSentinel, - RunImpl, - SingleStepResult, - TraceCtxManager, - get_model_tracing_impl, -) -from .agent import Agent -from .agent_output import AgentOutputSchema -from .exceptions import ( - AgentsException, - InputGuardrailTripwireTriggered, - MaxTurnsExceeded, - ModelBehaviorError, - OutputGuardrailTripwireTriggered, -) -from .guardrail import InputGuardrail, InputGuardrailResult, OutputGuardrail, OutputGuardrailResult -from .handoffs import Handoff, HandoffInputFilter, handoff -from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem -from .lifecycle import RunHooks -from .logger import logger -from .model_settings import ModelSettings -from .models.interface import Model, ModelProvider -from .models.openai_provider import OpenAIProvider -from .result import RunResult, RunResultStreaming -from .run_context import RunContextWrapper, TContext -from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent -from .tool import Tool -from .tracing import Span, SpanError, agent_span, get_current_trace, trace -from .tracing.span_data import AgentSpanData -from .usage import Usage -from .util import _coro, _error_tracing - -DEFAULT_MAX_TURNS = 10 - - -@dataclass -class RunConfig: - """Configures settings for the entire agent run.""" - - model: str | Model | None = None - """The model to use for the entire agent run. If set, will override the model set on every - agent. The model_provider passed in below must be able to resolve this model name. - """ - - model_provider: ModelProvider = field(default_factory=OpenAIProvider) - """The model provider to use when looking up string model names. Defaults to OpenAI.""" - - model_settings: ModelSettings | None = None - """Configure global model settings. Any non-null values will override the agent-specific model - settings. - """ - - handoff_input_filter: HandoffInputFilter | None = None - """A global input filter to apply to all handoffs. If `Handoff.input_filter` is set, then that - will take precedence. The input filter allows you to edit the inputs that are sent to the new - agent. See the documentation in `Handoff.input_filter` for more details. - """ - - input_guardrails: list[InputGuardrail[Any]] | None = None - """A list of input guardrails to run on the initial run input.""" - - output_guardrails: list[OutputGuardrail[Any]] | None = None - """A list of output guardrails to run on the final output of the run.""" - - tracing_disabled: bool = False - """Whether tracing is disabled for the agent run. If disabled, we will not trace the agent run. - """ - - trace_include_sensitive_data: bool = True - """Whether we include potentially sensitive data (for example: inputs/outputs of tool calls or - LLM generations) in traces. If False, we'll still create spans for these events, but the - sensitive data will not be included. - """ - - workflow_name: str = "Agent workflow" - """The name of the run, used for tracing. Should be a logical name for the run, like - "Code generation workflow" or "Customer support agent". - """ - - trace_id: str | None = None - """A custom trace ID to use for tracing. If not provided, we will generate a new trace ID.""" - - group_id: str | None = None - """ - A grouping identifier to use for tracing, to link multiple traces from the same conversation - or process. For example, you might use a chat thread ID. - """ - - trace_metadata: dict[str, Any] | None = None - """ - An optional dictionary of additional metadata to include with the trace. - """ - - -class Runner: - @classmethod - async def run( - cls, - starting_agent: Agent[TContext], - input: str | list[TResponseInputItem], - *, - context: TContext | None = None, - max_turns: int = DEFAULT_MAX_TURNS, - hooks: RunHooks[TContext] | None = None, - run_config: RunConfig | None = None, - ) -> RunResult: - """Run a workflow starting at the given agent. The agent will run in a loop until a final - output is generated. The loop runs like so: - 1. The agent is invoked with the given input. - 2. If there is a final output (i.e. the agent produces something of type - `agent.output_type`, the loop terminates. - 3. If there's a handoff, we run the loop again, with the new agent. - 4. Else, we run tool calls (if any), and re-run the loop. - - In two cases, the agent may raise an exception: - 1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised. - 2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised. - - Note that only the first agent's input guardrails are run. - - Args: - starting_agent: The starting agent to run. - input: The initial input to the agent. You can pass a single string for a user message, - or a list of input items. - context: The context to run the agent with. - max_turns: The maximum number of turns to run the agent for. A turn is defined as one - AI invocation (including any tool calls that might occur). - hooks: An object that receives callbacks on various lifecycle events. - run_config: Global settings for the entire agent run. - - Returns: - A run result containing all the inputs, guardrail results and the output of the last - agent. Agents may perform handoffs, so we don't know the specific type of the output. - """ - if hooks is None: - hooks = RunHooks[Any]() - if run_config is None: - run_config = RunConfig() - - tool_use_tracker = AgentToolUseTracker() - - with TraceCtxManager( - workflow_name=run_config.workflow_name, - trace_id=run_config.trace_id, - group_id=run_config.group_id, - metadata=run_config.trace_metadata, - disabled=run_config.tracing_disabled, - ): - current_turn = 0 - original_input: str | list[TResponseInputItem] = copy.deepcopy(input) - generated_items: list[RunItem] = [] - model_responses: list[ModelResponse] = [] - - context_wrapper: RunContextWrapper[TContext] = RunContextWrapper( - context=context, # type: ignore - ) - - input_guardrail_results: list[InputGuardrailResult] = [] - - current_span: Span[AgentSpanData] | None = None - current_agent = starting_agent - should_run_agent_start_hooks = True - - try: - while True: - # Start an agent span if we don't have one. This span is ended if the current - # agent changes, or if the agent loop ends. - if current_span is None: - handoff_names = [h.agent_name for h in cls._get_handoffs(current_agent)] - if output_schema := cls._get_output_schema(current_agent): - output_type_name = output_schema.output_type_name() - else: - output_type_name = "str" - - current_span = agent_span( - name=current_agent.name, - handoffs=handoff_names, - output_type=output_type_name, - ) - current_span.start(mark_as_current=True) - - all_tools = await cls._get_all_tools(current_agent) - current_span.span_data.tools = [t.name for t in all_tools] - - current_turn += 1 - if current_turn > max_turns: - _error_tracing.attach_error_to_span( - current_span, - SpanError( - message="Max turns exceeded", - data={"max_turns": max_turns}, - ), - ) - raise MaxTurnsExceeded(f"Max turns ({max_turns}) exceeded") - - logger.debug( - f"Running agent {current_agent.name} (turn {current_turn})", - ) - - if current_turn == 1: - input_guardrail_results, turn_result = await asyncio.gather( - cls._run_input_guardrails( - starting_agent, - starting_agent.input_guardrails - + (run_config.input_guardrails or []), - copy.deepcopy(input), - context_wrapper, - ), - cls._run_single_turn( - agent=current_agent, - all_tools=all_tools, - original_input=original_input, - generated_items=generated_items, - hooks=hooks, - context_wrapper=context_wrapper, - run_config=run_config, - should_run_agent_start_hooks=should_run_agent_start_hooks, - tool_use_tracker=tool_use_tracker, - ), - ) - else: - turn_result = await cls._run_single_turn( - agent=current_agent, - all_tools=all_tools, - original_input=original_input, - generated_items=generated_items, - hooks=hooks, - context_wrapper=context_wrapper, - run_config=run_config, - should_run_agent_start_hooks=should_run_agent_start_hooks, - tool_use_tracker=tool_use_tracker, - ) - should_run_agent_start_hooks = False - - model_responses.append(turn_result.model_response) - original_input = turn_result.original_input - generated_items = turn_result.generated_items - - if isinstance(turn_result.next_step, NextStepFinalOutput): - output_guardrail_results = await cls._run_output_guardrails( - current_agent.output_guardrails + (run_config.output_guardrails or []), - current_agent, - turn_result.next_step.output, - context_wrapper, - ) - return RunResult( - input=original_input, - new_items=generated_items, - raw_responses=model_responses, - final_output=turn_result.next_step.output, - _last_agent=current_agent, - input_guardrail_results=input_guardrail_results, - output_guardrail_results=output_guardrail_results, - ) - elif isinstance(turn_result.next_step, NextStepHandoff): - current_agent = cast(Agent[TContext], turn_result.next_step.new_agent) - current_span.finish(reset_current=True) - current_span = None - should_run_agent_start_hooks = True - elif isinstance(turn_result.next_step, NextStepRunAgain): - pass - else: - raise AgentsException( - f"Unknown next step type: {type(turn_result.next_step)}" - ) - finally: - if current_span: - current_span.finish(reset_current=True) - - @classmethod - def run_sync( - cls, - starting_agent: Agent[TContext], - input: str | list[TResponseInputItem], - *, - context: TContext | None = None, - max_turns: int = DEFAULT_MAX_TURNS, - hooks: RunHooks[TContext] | None = None, - run_config: RunConfig | None = None, - ) -> RunResult: - """Run a workflow synchronously, starting at the given agent. Note that this just wraps the - `run` method, so it will not work if there's already an event loop (e.g. inside an async - function, or in a Jupyter notebook or async context like FastAPI). For those cases, use - the `run` method instead. - - The agent will run in a loop until a final output is generated. The loop runs like so: - 1. The agent is invoked with the given input. - 2. If there is a final output (i.e. the agent produces something of type - `agent.output_type`, the loop terminates. - 3. If there's a handoff, we run the loop again, with the new agent. - 4. Else, we run tool calls (if any), and re-run the loop. - - In two cases, the agent may raise an exception: - 1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised. - 2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised. - - Note that only the first agent's input guardrails are run. - - Args: - starting_agent: The starting agent to run. - input: The initial input to the agent. You can pass a single string for a user message, - or a list of input items. - context: The context to run the agent with. - max_turns: The maximum number of turns to run the agent for. A turn is defined as one - AI invocation (including any tool calls that might occur). - hooks: An object that receives callbacks on various lifecycle events. - run_config: Global settings for the entire agent run. - - Returns: - A run result containing all the inputs, guardrail results and the output of the last - agent. Agents may perform handoffs, so we don't know the specific type of the output. - """ - return asyncio.get_event_loop().run_until_complete( - cls.run( - starting_agent, - input, - context=context, - max_turns=max_turns, - hooks=hooks, - run_config=run_config, - ) - ) - - @classmethod - def run_streamed( - cls, - starting_agent: Agent[TContext], - input: str | list[TResponseInputItem], - context: TContext | None = None, - max_turns: int = DEFAULT_MAX_TURNS, - hooks: RunHooks[TContext] | None = None, - run_config: RunConfig | None = None, - ) -> RunResultStreaming: - """Run a workflow starting at the given agent in streaming mode. The returned result object - contains a method you can use to stream semantic events as they are generated. - - The agent will run in a loop until a final output is generated. The loop runs like so: - 1. The agent is invoked with the given input. - 2. If there is a final output (i.e. the agent produces something of type - `agent.output_type`, the loop terminates. - 3. If there's a handoff, we run the loop again, with the new agent. - 4. Else, we run tool calls (if any), and re-run the loop. - - In two cases, the agent may raise an exception: - 1. If the max_turns is exceeded, a MaxTurnsExceeded exception is raised. - 2. If a guardrail tripwire is triggered, a GuardrailTripwireTriggered exception is raised. - - Note that only the first agent's input guardrails are run. - - Args: - starting_agent: The starting agent to run. - input: The initial input to the agent. You can pass a single string for a user message, - or a list of input items. - context: The context to run the agent with. - max_turns: The maximum number of turns to run the agent for. A turn is defined as one - AI invocation (including any tool calls that might occur). - hooks: An object that receives callbacks on various lifecycle events. - run_config: Global settings for the entire agent run. - - Returns: - A result object that contains data about the run, as well as a method to stream events. - """ - if hooks is None: - hooks = RunHooks[Any]() - if run_config is None: - run_config = RunConfig() - - # If there's already a trace, we don't create a new one. In addition, we can't end the - # trace here, because the actual work is done in `stream_events` and this method ends - # before that. - new_trace = ( - None - if get_current_trace() - else trace( - workflow_name=run_config.workflow_name, - trace_id=run_config.trace_id, - group_id=run_config.group_id, - metadata=run_config.trace_metadata, - disabled=run_config.tracing_disabled, - ) - ) - # Need to start the trace here, because the current trace contextvar is captured at - # asyncio.create_task time - if new_trace: - new_trace.start(mark_as_current=True) - - output_schema = cls._get_output_schema(starting_agent) - context_wrapper: RunContextWrapper[TContext] = RunContextWrapper( - context=context # type: ignore - ) - - streamed_result = RunResultStreaming( - input=copy.deepcopy(input), - new_items=[], - current_agent=starting_agent, - raw_responses=[], - final_output=None, - is_complete=False, - current_turn=0, - max_turns=max_turns, - input_guardrail_results=[], - output_guardrail_results=[], - _current_agent_output_schema=output_schema, - _trace=new_trace, - ) - - # Kick off the actual agent loop in the background and return the streamed result object. - streamed_result._run_impl_task = asyncio.create_task( - cls._run_streamed_impl( - starting_input=input, - streamed_result=streamed_result, - starting_agent=starting_agent, - max_turns=max_turns, - hooks=hooks, - context_wrapper=context_wrapper, - run_config=run_config, - ) - ) - return streamed_result - - @classmethod - async def _run_input_guardrails_with_queue( - cls, - agent: Agent[Any], - guardrails: list[InputGuardrail[TContext]], - input: str | list[TResponseInputItem], - context: RunContextWrapper[TContext], - streamed_result: RunResultStreaming, - parent_span: Span[Any], - ): - queue = streamed_result._input_guardrail_queue - - # We'll run the guardrails and push them onto the queue as they complete - guardrail_tasks = [ - asyncio.create_task( - RunImpl.run_single_input_guardrail(agent, guardrail, input, context) - ) - for guardrail in guardrails - ] - guardrail_results = [] - try: - for done in asyncio.as_completed(guardrail_tasks): - result = await done - if result.output.tripwire_triggered: - _error_tracing.attach_error_to_span( - parent_span, - SpanError( - message="Guardrail tripwire triggered", - data={ - "guardrail": result.guardrail.get_name(), - "type": "input_guardrail", - }, - ), - ) - queue.put_nowait(result) - guardrail_results.append(result) - except Exception: - for t in guardrail_tasks: - t.cancel() - raise - - streamed_result.input_guardrail_results = guardrail_results - - @classmethod - async def _run_streamed_impl( - cls, - starting_input: str | list[TResponseInputItem], - streamed_result: RunResultStreaming, - starting_agent: Agent[TContext], - max_turns: int, - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - run_config: RunConfig, - ): - current_span: Span[AgentSpanData] | None = None - current_agent = starting_agent - current_turn = 0 - should_run_agent_start_hooks = True - tool_use_tracker = AgentToolUseTracker() - - streamed_result._event_queue.put_nowait(AgentUpdatedStreamEvent(new_agent=current_agent)) - - try: - while True: - if streamed_result.is_complete: - break - - # Start an agent span if we don't have one. This span is ended if the current - # agent changes, or if the agent loop ends. - if current_span is None: - handoff_names = [h.agent_name for h in cls._get_handoffs(current_agent)] - if output_schema := cls._get_output_schema(current_agent): - output_type_name = output_schema.output_type_name() - else: - output_type_name = "str" - - current_span = agent_span( - name=current_agent.name, - handoffs=handoff_names, - output_type=output_type_name, - ) - current_span.start(mark_as_current=True) - - all_tools = await cls._get_all_tools(current_agent) - tool_names = [t.name for t in all_tools] - current_span.span_data.tools = tool_names - current_turn += 1 - streamed_result.current_turn = current_turn - - if current_turn > max_turns: - _error_tracing.attach_error_to_span( - current_span, - SpanError( - message="Max turns exceeded", - data={"max_turns": max_turns}, - ), - ) - streamed_result._event_queue.put_nowait(QueueCompleteSentinel()) - break - - if current_turn == 1: - # Run the input guardrails in the background and put the results on the queue - streamed_result._input_guardrails_task = asyncio.create_task( - cls._run_input_guardrails_with_queue( - starting_agent, - starting_agent.input_guardrails + (run_config.input_guardrails or []), - copy.deepcopy(ItemHelpers.input_to_new_input_list(starting_input)), - context_wrapper, - streamed_result, - current_span, - ) - ) - try: - turn_result = await cls._run_single_turn_streamed( - streamed_result, - current_agent, - hooks, - context_wrapper, - run_config, - should_run_agent_start_hooks, - tool_use_tracker, - all_tools, - ) - should_run_agent_start_hooks = False - - streamed_result.raw_responses = streamed_result.raw_responses + [ - turn_result.model_response - ] - streamed_result.input = turn_result.original_input - streamed_result.new_items = turn_result.generated_items - - if isinstance(turn_result.next_step, NextStepHandoff): - current_agent = turn_result.next_step.new_agent - current_span.finish(reset_current=True) - current_span = None - should_run_agent_start_hooks = True - streamed_result._event_queue.put_nowait( - AgentUpdatedStreamEvent(new_agent=current_agent) - ) - elif isinstance(turn_result.next_step, NextStepFinalOutput): - streamed_result._output_guardrails_task = asyncio.create_task( - cls._run_output_guardrails( - current_agent.output_guardrails - + (run_config.output_guardrails or []), - current_agent, - turn_result.next_step.output, - context_wrapper, - ) - ) - - try: - output_guardrail_results = await streamed_result._output_guardrails_task - except Exception: - # Exceptions will be checked in the stream_events loop - output_guardrail_results = [] - - streamed_result.output_guardrail_results = output_guardrail_results - streamed_result.final_output = turn_result.next_step.output - streamed_result.is_complete = True - streamed_result._event_queue.put_nowait(QueueCompleteSentinel()) - elif isinstance(turn_result.next_step, NextStepRunAgain): - pass - except Exception as e: - if current_span: - _error_tracing.attach_error_to_span( - current_span, - SpanError( - message="Error in agent run", - data={"error": str(e)}, - ), - ) - streamed_result.is_complete = True - streamed_result._event_queue.put_nowait(QueueCompleteSentinel()) - raise - - streamed_result.is_complete = True - finally: - if current_span: - current_span.finish(reset_current=True) - - @classmethod - async def _run_single_turn_streamed( - cls, - streamed_result: RunResultStreaming, - agent: Agent[TContext], - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - run_config: RunConfig, - should_run_agent_start_hooks: bool, - tool_use_tracker: AgentToolUseTracker, - all_tools: list[Tool], - ) -> SingleStepResult: - if should_run_agent_start_hooks: - await asyncio.gather( - hooks.on_agent_start(context_wrapper, agent), - ( - agent.hooks.on_start(context_wrapper, agent) - if agent.hooks - else _coro.noop_coroutine() - ), - ) - - output_schema = cls._get_output_schema(agent) - - streamed_result.current_agent = agent - streamed_result._current_agent_output_schema = output_schema - - system_prompt = await agent.get_system_prompt(context_wrapper) - - handoffs = cls._get_handoffs(agent) - model = cls._get_model(agent, run_config) - model_settings = agent.model_settings.resolve(run_config.model_settings) - model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings) - - final_response: ModelResponse | None = None - - input = ItemHelpers.input_to_new_input_list(streamed_result.input) - input.extend([item.to_input_item() for item in streamed_result.new_items]) - - # 1. Stream the output events - async for event in model.stream_response( - system_prompt, - input, - model_settings, - all_tools, - output_schema, - handoffs, - get_model_tracing_impl( - run_config.tracing_disabled, run_config.trace_include_sensitive_data - ), - ): - if isinstance(event, ResponseCompletedEvent): - usage = ( - Usage( - requests=1, - input_tokens=event.response.usage.input_tokens, - output_tokens=event.response.usage.output_tokens, - total_tokens=event.response.usage.total_tokens, - ) - if event.response.usage - else Usage() - ) - final_response = ModelResponse( - output=event.response.output, - usage=usage, - referenceable_id=event.response.id, - ) - - streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event)) - - # 2. At this point, the streaming is complete for this turn of the agent loop. - if not final_response: - raise ModelBehaviorError("Model did not produce a final response!") - - # 3. Now, we can process the turn as we do in the non-streaming case - single_step_result = await cls._get_single_step_result_from_response( - agent=agent, - original_input=streamed_result.input, - pre_step_items=streamed_result.new_items, - new_response=final_response, - output_schema=output_schema, - all_tools=all_tools, - handoffs=handoffs, - hooks=hooks, - context_wrapper=context_wrapper, - run_config=run_config, - tool_use_tracker=tool_use_tracker, - ) - - RunImpl.stream_step_result_to_queue(single_step_result, streamed_result._event_queue) - return single_step_result - - @classmethod - async def _run_single_turn( - cls, - *, - agent: Agent[TContext], - all_tools: list[Tool], - original_input: str | list[TResponseInputItem], - generated_items: list[RunItem], - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - run_config: RunConfig, - should_run_agent_start_hooks: bool, - tool_use_tracker: AgentToolUseTracker, - ) -> SingleStepResult: - # Ensure we run the hooks before anything else - if should_run_agent_start_hooks: - await asyncio.gather( - hooks.on_agent_start(context_wrapper, agent), - ( - agent.hooks.on_start(context_wrapper, agent) - if agent.hooks - else _coro.noop_coroutine() - ), - ) - - system_prompt = await agent.get_system_prompt(context_wrapper) - - output_schema = cls._get_output_schema(agent) - handoffs = cls._get_handoffs(agent) - input = ItemHelpers.input_to_new_input_list(original_input) - input.extend([generated_item.to_input_item() for generated_item in generated_items]) - - new_response = await cls._get_new_response( - agent, - system_prompt, - input, - output_schema, - all_tools, - handoffs, - context_wrapper, - run_config, - tool_use_tracker, - ) - - return await cls._get_single_step_result_from_response( - agent=agent, - original_input=original_input, - pre_step_items=generated_items, - new_response=new_response, - output_schema=output_schema, - all_tools=all_tools, - handoffs=handoffs, - hooks=hooks, - context_wrapper=context_wrapper, - run_config=run_config, - tool_use_tracker=tool_use_tracker, - ) - - @classmethod - async def _get_single_step_result_from_response( - cls, - *, - agent: Agent[TContext], - all_tools: list[Tool], - original_input: str | list[TResponseInputItem], - pre_step_items: list[RunItem], - new_response: ModelResponse, - output_schema: AgentOutputSchema | None, - handoffs: list[Handoff], - hooks: RunHooks[TContext], - context_wrapper: RunContextWrapper[TContext], - run_config: RunConfig, - tool_use_tracker: AgentToolUseTracker, - ) -> SingleStepResult: - processed_response = RunImpl.process_model_response( - agent=agent, - all_tools=all_tools, - response=new_response, - output_schema=output_schema, - handoffs=handoffs, - ) - - tool_use_tracker.add_tool_use(agent, processed_response.tools_used) - - return await RunImpl.execute_tools_and_side_effects( - agent=agent, - original_input=original_input, - pre_step_items=pre_step_items, - new_response=new_response, - processed_response=processed_response, - output_schema=output_schema, - hooks=hooks, - context_wrapper=context_wrapper, - run_config=run_config, - ) - - @classmethod - async def _run_input_guardrails( - cls, - agent: Agent[Any], - guardrails: list[InputGuardrail[TContext]], - input: str | list[TResponseInputItem], - context: RunContextWrapper[TContext], - ) -> list[InputGuardrailResult]: - if not guardrails: - return [] - - guardrail_tasks = [ - asyncio.create_task( - RunImpl.run_single_input_guardrail(agent, guardrail, input, context) - ) - for guardrail in guardrails - ] - - guardrail_results = [] - - for done in asyncio.as_completed(guardrail_tasks): - result = await done - if result.output.tripwire_triggered: - # Cancel all guardrail tasks if a tripwire is triggered. - for t in guardrail_tasks: - t.cancel() - _error_tracing.attach_error_to_current_span( - SpanError( - message="Guardrail tripwire triggered", - data={"guardrail": result.guardrail.get_name()}, - ) - ) - raise InputGuardrailTripwireTriggered(result) - else: - guardrail_results.append(result) - - return guardrail_results - - @classmethod - async def _run_output_guardrails( - cls, - guardrails: list[OutputGuardrail[TContext]], - agent: Agent[TContext], - agent_output: Any, - context: RunContextWrapper[TContext], - ) -> list[OutputGuardrailResult]: - if not guardrails: - return [] - - guardrail_tasks = [ - asyncio.create_task( - RunImpl.run_single_output_guardrail(guardrail, agent, agent_output, context) - ) - for guardrail in guardrails - ] - - guardrail_results = [] - - for done in asyncio.as_completed(guardrail_tasks): - result = await done - if result.output.tripwire_triggered: - # Cancel all guardrail tasks if a tripwire is triggered. - for t in guardrail_tasks: - t.cancel() - _error_tracing.attach_error_to_current_span( - SpanError( - message="Guardrail tripwire triggered", - data={"guardrail": result.guardrail.get_name()}, - ) - ) - raise OutputGuardrailTripwireTriggered(result) - else: - guardrail_results.append(result) - - return guardrail_results - - @classmethod - async def _get_new_response( - cls, - agent: Agent[TContext], - system_prompt: str | None, - input: list[TResponseInputItem], - output_schema: AgentOutputSchema | None, - all_tools: list[Tool], - handoffs: list[Handoff], - context_wrapper: RunContextWrapper[TContext], - run_config: RunConfig, - tool_use_tracker: AgentToolUseTracker, - ) -> ModelResponse: - model = cls._get_model(agent, run_config) - model_settings = agent.model_settings.resolve(run_config.model_settings) - model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings) - - new_response = await model.get_response( - system_instructions=system_prompt, - input=input, - model_settings=model_settings, - tools=all_tools, - output_schema=output_schema, - handoffs=handoffs, - tracing=get_model_tracing_impl( - run_config.tracing_disabled, run_config.trace_include_sensitive_data - ), - ) - - context_wrapper.usage.add(new_response.usage) - - return new_response - - @classmethod - def _get_output_schema(cls, agent: Agent[Any]) -> AgentOutputSchema | None: - if agent.output_type is None or agent.output_type is str: - return None - - return AgentOutputSchema(agent.output_type) - - @classmethod - def _get_handoffs(cls, agent: Agent[Any]) -> list[Handoff]: - handoffs = [] - for handoff_item in agent.handoffs: - if isinstance(handoff_item, Handoff): - handoffs.append(handoff_item) - elif isinstance(handoff_item, Agent): - handoffs.append(handoff(handoff_item)) - return handoffs - - @classmethod - async def _get_all_tools(cls, agent: Agent[Any]) -> list[Tool]: - return await agent.get_all_tools() - - @classmethod - def _get_model(cls, agent: Agent[Any], run_config: RunConfig) -> Model: - if isinstance(run_config.model, Model): - return run_config.model - elif isinstance(run_config.model, str): - return run_config.model_provider.get_model(run_config.model) - elif isinstance(agent.model, Model): - return agent.model - - return run_config.model_provider.get_model(agent.model) diff --git a/src/agents/run_context.py b/src/agents/run_context.py deleted file mode 100644 index 579a215f..00000000 --- a/src/agents/run_context.py +++ /dev/null @@ -1,26 +0,0 @@ -from dataclasses import dataclass, field -from typing import Any, Generic - -from typing_extensions import TypeVar - -from .usage import Usage - -TContext = TypeVar("TContext", default=Any) - - -@dataclass -class RunContextWrapper(Generic[TContext]): - """This wraps the context object that you passed to `Runner.run()`. It also contains - information about the usage of the agent run so far. - - NOTE: Contexts are not passed to the LLM. They're a way to pass dependencies and data to code - you implement, like tool functions, callbacks, hooks, etc. - """ - - context: TContext - """The context object (or None), passed by you to `Runner.run()`""" - - usage: Usage = field(default_factory=Usage) - """The usage of the agent run so far. For streamed responses, the usage will be stale until the - last chunk of the stream is processed. - """ diff --git a/src/agents/stream_events.py b/src/agents/stream_events.py deleted file mode 100644 index bd37d11f..00000000 --- a/src/agents/stream_events.py +++ /dev/null @@ -1,58 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from typing import Any, Literal, Union - -from typing_extensions import TypeAlias - -from .agent import Agent -from .items import RunItem, TResponseStreamEvent - - -@dataclass -class RawResponsesStreamEvent: - """Streaming event from the LLM. These are 'raw' events, i.e. they are directly passed through - from the LLM. - """ - - data: TResponseStreamEvent - """The raw responses streaming event from the LLM.""" - - type: Literal["raw_response_event"] = "raw_response_event" - """The type of the event.""" - - -@dataclass -class RunItemStreamEvent: - """Streaming events that wrap a `RunItem`. As the agent processes the LLM response, it will - generate these events for new messages, tool calls, tool outputs, handoffs, etc. - """ - - name: Literal[ - "message_output_created", - "handoff_requested", - "handoff_occured", - "tool_called", - "tool_output", - "reasoning_item_created", - ] - """The name of the event.""" - - item: RunItem - """The item that was created.""" - - type: Literal["run_item_stream_event"] = "run_item_stream_event" - - -@dataclass -class AgentUpdatedStreamEvent: - """Event that notifies that there is a new agent running.""" - - new_agent: Agent[Any] - """The new agent.""" - - type: Literal["agent_updated_stream_event"] = "agent_updated_stream_event" - - -StreamEvent: TypeAlias = Union[RawResponsesStreamEvent, RunItemStreamEvent, AgentUpdatedStreamEvent] -"""A streaming event from an agent.""" diff --git a/src/agents/strict_schema.py b/src/agents/strict_schema.py deleted file mode 100644 index 3f37660a..00000000 --- a/src/agents/strict_schema.py +++ /dev/null @@ -1,167 +0,0 @@ -from __future__ import annotations - -from typing import Any - -from openai import NOT_GIVEN -from typing_extensions import TypeGuard - -from .exceptions import UserError - -_EMPTY_SCHEMA = { - "additionalProperties": False, - "type": "object", - "properties": {}, - "required": [], -} - - -def ensure_strict_json_schema( - schema: dict[str, Any], -) -> dict[str, Any]: - """Mutates the given JSON schema to ensure it conforms to the `strict` standard - that the OpenAI API expects. - """ - if schema == {}: - return _EMPTY_SCHEMA - return _ensure_strict_json_schema(schema, path=(), root=schema) - - -# Adapted from https://github.com/openai/openai-python/blob/main/src/openai/lib/_pydantic.py -def _ensure_strict_json_schema( - json_schema: object, - *, - path: tuple[str, ...], - root: dict[str, object], -) -> dict[str, Any]: - if not is_dict(json_schema): - raise TypeError(f"Expected {json_schema} to be a dictionary; path={path}") - - defs = json_schema.get("$defs") - if is_dict(defs): - for def_name, def_schema in defs.items(): - _ensure_strict_json_schema(def_schema, path=(*path, "$defs", def_name), root=root) - - definitions = json_schema.get("definitions") - if is_dict(definitions): - for definition_name, definition_schema in definitions.items(): - _ensure_strict_json_schema( - definition_schema, path=(*path, "definitions", definition_name), root=root - ) - - typ = json_schema.get("type") - if typ == "object" and "additionalProperties" not in json_schema: - json_schema["additionalProperties"] = False - elif ( - typ == "object" - and "additionalProperties" in json_schema - and json_schema["additionalProperties"] - ): - raise UserError( - "additionalProperties should not be set for object types. This could be because " - "you're using an older version of Pydantic, or because you configured additional " - "properties to be allowed. If you really need this, update the function or output tool " - "to not use a strict schema." - ) - - # object types - # { 'type': 'object', 'properties': { 'a': {...} } } - properties = json_schema.get("properties") - if is_dict(properties): - json_schema["required"] = list(properties.keys()) - json_schema["properties"] = { - key: _ensure_strict_json_schema(prop_schema, path=(*path, "properties", key), root=root) - for key, prop_schema in properties.items() - } - - # arrays - # { 'type': 'array', 'items': {...} } - items = json_schema.get("items") - if is_dict(items): - json_schema["items"] = _ensure_strict_json_schema(items, path=(*path, "items"), root=root) - - # unions - any_of = json_schema.get("anyOf") - if is_list(any_of): - json_schema["anyOf"] = [ - _ensure_strict_json_schema(variant, path=(*path, "anyOf", str(i)), root=root) - for i, variant in enumerate(any_of) - ] - - # intersections - all_of = json_schema.get("allOf") - if is_list(all_of): - if len(all_of) == 1: - json_schema.update( - _ensure_strict_json_schema(all_of[0], path=(*path, "allOf", "0"), root=root) - ) - json_schema.pop("allOf") - else: - json_schema["allOf"] = [ - _ensure_strict_json_schema(entry, path=(*path, "allOf", str(i)), root=root) - for i, entry in enumerate(all_of) - ] - - # strip `None` defaults as there's no meaningful distinction here - # the schema will still be `nullable` and the model will default - # to using `None` anyway - if json_schema.get("default", NOT_GIVEN) is None: - json_schema.pop("default") - - # we can't use `$ref`s if there are also other properties defined, e.g. - # `{"$ref": "...", "description": "my description"}` - # - # so we unravel the ref - # `{"type": "string", "description": "my description"}` - ref = json_schema.get("$ref") - if ref and has_more_than_n_keys(json_schema, 1): - assert isinstance(ref, str), f"Received non-string $ref - {ref}" - - resolved = resolve_ref(root=root, ref=ref) - if not is_dict(resolved): - raise ValueError( - f"Expected `$ref: {ref}` to resolved to a dictionary but got {resolved}" - ) - - # properties from the json schema take priority over the ones on the `$ref` - json_schema.update({**resolved, **json_schema}) - json_schema.pop("$ref") - # Since the schema expanded from `$ref` might not have `additionalProperties: false` applied - # we call `_ensure_strict_json_schema` again to fix the inlined schema and ensure it's valid - return _ensure_strict_json_schema(json_schema, path=path, root=root) - - return json_schema - - -def resolve_ref(*, root: dict[str, object], ref: str) -> object: - if not ref.startswith("#/"): - raise ValueError(f"Unexpected $ref format {ref!r}; Does not start with #/") - - path = ref[2:].split("/") - resolved = root - for key in path: - value = resolved[key] - assert is_dict(value), ( - f"encountered non-dictionary entry while resolving {ref} - {resolved}" - ) - resolved = value - - return resolved - - -def is_dict(obj: object) -> TypeGuard[dict[str, object]]: - # just pretend that we know there are only `str` keys - # as that check is not worth the performance cost - return isinstance(obj, dict) - - -def is_list(obj: object) -> TypeGuard[list[object]]: - return isinstance(obj, list) - - -def has_more_than_n_keys(obj: dict[str, object], n: int) -> bool: - i = 0 - for _ in obj.keys(): - i += 1 - if i > n: - return True - return False diff --git a/src/agents/tool.py b/src/agents/tool.py deleted file mode 100644 index c1c16242..00000000 --- a/src/agents/tool.py +++ /dev/null @@ -1,310 +0,0 @@ -from __future__ import annotations - -import inspect -import json -from collections.abc import Awaitable -from dataclasses import dataclass -from typing import Any, Callable, Literal, Union, overload - -from openai.types.responses.file_search_tool_param import Filters, RankingOptions -from openai.types.responses.web_search_tool_param import UserLocation -from pydantic import ValidationError -from typing_extensions import Concatenate, ParamSpec - -from . import _debug -from .computer import AsyncComputer, Computer -from .exceptions import ModelBehaviorError -from .function_schema import DocstringStyle, function_schema -from .items import RunItem -from .logger import logger -from .run_context import RunContextWrapper -from .tracing import SpanError -from .util import _error_tracing -from .util._types import MaybeAwaitable - -ToolParams = ParamSpec("ToolParams") - -ToolFunctionWithoutContext = Callable[ToolParams, Any] -ToolFunctionWithContext = Callable[Concatenate[RunContextWrapper[Any], ToolParams], Any] - -ToolFunction = Union[ToolFunctionWithoutContext[ToolParams], ToolFunctionWithContext[ToolParams]] - - -@dataclass -class FunctionToolResult: - tool: FunctionTool - """The tool that was run.""" - - output: Any - """The output of the tool.""" - - run_item: RunItem - """The run item that was produced as a result of the tool call.""" - - -@dataclass -class FunctionTool: - """A tool that wraps a function. In most cases, you should use the `function_tool` helpers to - create a FunctionTool, as they let you easily wrap a Python function. - """ - - name: str - """The name of the tool, as shown to the LLM. Generally the name of the function.""" - - description: str - """A description of the tool, as shown to the LLM.""" - - params_json_schema: dict[str, Any] - """The JSON schema for the tool's parameters.""" - - on_invoke_tool: Callable[[RunContextWrapper[Any], str], Awaitable[Any]] - """A function that invokes the tool with the given context and parameters. The params passed - are: - 1. The tool run context. - 2. The arguments from the LLM, as a JSON string. - - You must return a string representation of the tool output, or something we can call `str()` on. - In case of errors, you can either raise an Exception (which will cause the run to fail) or - return a string error message (which will be sent back to the LLM). - """ - - strict_json_schema: bool = True - """Whether the JSON schema is in strict mode. We **strongly** recommend setting this to True, - as it increases the likelihood of correct JSON input.""" - - -@dataclass -class FileSearchTool: - """A hosted tool that lets the LLM search through a vector store. Currently only supported with - OpenAI models, using the Responses API. - """ - - vector_store_ids: list[str] - """The IDs of the vector stores to search.""" - - max_num_results: int | None = None - """The maximum number of results to return.""" - - include_search_results: bool = False - """Whether to include the search results in the output produced by the LLM.""" - - ranking_options: RankingOptions | None = None - """Ranking options for search.""" - - filters: Filters | None = None - """A filter to apply based on file attributes.""" - - @property - def name(self): - return "file_search" - - -@dataclass -class WebSearchTool: - """A hosted tool that lets the LLM search the web. Currently only supported with OpenAI models, - using the Responses API. - """ - - user_location: UserLocation | None = None - """Optional location for the search. Lets you customize results to be relevant to a location.""" - - search_context_size: Literal["low", "medium", "high"] = "medium" - """The amount of context to use for the search.""" - - @property - def name(self): - return "web_search_preview" - - -@dataclass -class ComputerTool: - """A hosted tool that lets the LLM control a computer.""" - - computer: Computer | AsyncComputer - """The computer implementation, which describes the environment and dimensions of the computer, - as well as implements the computer actions like click, screenshot, etc. - """ - - @property - def name(self): - return "computer_use_preview" - - -Tool = Union[FunctionTool, FileSearchTool, WebSearchTool, ComputerTool] -"""A tool that can be used in an agent.""" - - -def default_tool_error_function(ctx: RunContextWrapper[Any], error: Exception) -> str: - """The default tool error function, which just returns a generic error message.""" - return f"An error occurred while running the tool. Please try again. Error: {str(error)}" - - -ToolErrorFunction = Callable[[RunContextWrapper[Any], Exception], MaybeAwaitable[str]] - - -@overload -def function_tool( - func: ToolFunction[...], - *, - name_override: str | None = None, - description_override: str | None = None, - docstring_style: DocstringStyle | None = None, - use_docstring_info: bool = True, - failure_error_function: ToolErrorFunction | None = None, - strict_mode: bool = True, -) -> FunctionTool: - """Overload for usage as @function_tool (no parentheses).""" - ... - - -@overload -def function_tool( - *, - name_override: str | None = None, - description_override: str | None = None, - docstring_style: DocstringStyle | None = None, - use_docstring_info: bool = True, - failure_error_function: ToolErrorFunction | None = None, - strict_mode: bool = True, -) -> Callable[[ToolFunction[...]], FunctionTool]: - """Overload for usage as @function_tool(...).""" - ... - - -def function_tool( - func: ToolFunction[...] | None = None, - *, - name_override: str | None = None, - description_override: str | None = None, - docstring_style: DocstringStyle | None = None, - use_docstring_info: bool = True, - failure_error_function: ToolErrorFunction | None = default_tool_error_function, - strict_mode: bool = True, -) -> FunctionTool | Callable[[ToolFunction[...]], FunctionTool]: - """ - Decorator to create a FunctionTool from a function. By default, we will: - 1. Parse the function signature to create a JSON schema for the tool's parameters. - 2. Use the function's docstring to populate the tool's description. - 3. Use the function's docstring to populate argument descriptions. - The docstring style is detected automatically, but you can override it. - - If the function takes a `RunContextWrapper` as the first argument, it *must* match the - context type of the agent that uses the tool. - - Args: - func: The function to wrap. - name_override: If provided, use this name for the tool instead of the function's name. - description_override: If provided, use this description for the tool instead of the - function's docstring. - docstring_style: If provided, use this style for the tool's docstring. If not provided, - we will attempt to auto-detect the style. - use_docstring_info: If True, use the function's docstring to populate the tool's - description and argument descriptions. - failure_error_function: If provided, use this function to generate an error message when - the tool call fails. The error message is sent to the LLM. If you pass None, then no - error message will be sent and instead an Exception will be raised. - strict_mode: Whether to enable strict mode for the tool's JSON schema. We *strongly* - recommend setting this to True, as it increases the likelihood of correct JSON input. - If False, it allows non-strict JSON schemas. For example, if a parameter has a default - value, it will be optional, additional properties are allowed, etc. See here for more: - https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#supported-schemas - """ - - def _create_function_tool(the_func: ToolFunction[...]) -> FunctionTool: - schema = function_schema( - func=the_func, - name_override=name_override, - description_override=description_override, - docstring_style=docstring_style, - use_docstring_info=use_docstring_info, - strict_json_schema=strict_mode, - ) - - async def _on_invoke_tool_impl(ctx: RunContextWrapper[Any], input: str) -> Any: - try: - json_data: dict[str, Any] = json.loads(input) if input else {} - except Exception as e: - if _debug.DONT_LOG_TOOL_DATA: - logger.debug(f"Invalid JSON input for tool {schema.name}") - else: - logger.debug(f"Invalid JSON input for tool {schema.name}: {input}") - raise ModelBehaviorError( - f"Invalid JSON input for tool {schema.name}: {input}" - ) from e - - if _debug.DONT_LOG_TOOL_DATA: - logger.debug(f"Invoking tool {schema.name}") - else: - logger.debug(f"Invoking tool {schema.name} with input {input}") - - try: - parsed = ( - schema.params_pydantic_model(**json_data) - if json_data - else schema.params_pydantic_model() - ) - except ValidationError as e: - raise ModelBehaviorError(f"Invalid JSON input for tool {schema.name}: {e}") from e - - args, kwargs_dict = schema.to_call_args(parsed) - - if not _debug.DONT_LOG_TOOL_DATA: - logger.debug(f"Tool call args: {args}, kwargs: {kwargs_dict}") - - if inspect.iscoroutinefunction(the_func): - if schema.takes_context: - result = await the_func(ctx, *args, **kwargs_dict) - else: - result = await the_func(*args, **kwargs_dict) - else: - if schema.takes_context: - result = the_func(ctx, *args, **kwargs_dict) - else: - result = the_func(*args, **kwargs_dict) - - if _debug.DONT_LOG_TOOL_DATA: - logger.debug(f"Tool {schema.name} completed.") - else: - logger.debug(f"Tool {schema.name} returned {result}") - - return result - - async def _on_invoke_tool(ctx: RunContextWrapper[Any], input: str) -> Any: - try: - return await _on_invoke_tool_impl(ctx, input) - except Exception as e: - if failure_error_function is None: - raise - - result = failure_error_function(ctx, e) - if inspect.isawaitable(result): - return await result - - _error_tracing.attach_error_to_current_span( - SpanError( - message="Error running tool (non-fatal)", - data={ - "tool_name": schema.name, - "error": str(e), - }, - ) - ) - return result - - return FunctionTool( - name=schema.name, - description=schema.description or "", - params_json_schema=schema.params_json_schema, - on_invoke_tool=_on_invoke_tool, - strict_json_schema=strict_mode, - ) - - # If func is actually a callable, we were used as @function_tool with no parentheses - if callable(func): - return _create_function_tool(func) - - # Otherwise, we were used as @function_tool(...), so return a decorator - def decorator(real_func: ToolFunction[...]) -> FunctionTool: - return _create_function_tool(real_func) - - return decorator diff --git a/src/agents/tracing/__init__.py b/src/agents/tracing/__init__.py deleted file mode 100644 index 9df94426..00000000 --- a/src/agents/tracing/__init__.py +++ /dev/null @@ -1,113 +0,0 @@ -import atexit - -from .create import ( - agent_span, - custom_span, - function_span, - generation_span, - get_current_span, - get_current_trace, - guardrail_span, - handoff_span, - mcp_tools_span, - response_span, - speech_group_span, - speech_span, - trace, - transcription_span, -) -from .processor_interface import TracingProcessor -from .processors import default_exporter, default_processor -from .setup import GLOBAL_TRACE_PROVIDER -from .span_data import ( - AgentSpanData, - CustomSpanData, - FunctionSpanData, - GenerationSpanData, - GuardrailSpanData, - HandoffSpanData, - MCPListToolsSpanData, - ResponseSpanData, - SpanData, - SpeechGroupSpanData, - SpeechSpanData, - TranscriptionSpanData, -) -from .spans import Span, SpanError -from .traces import Trace -from .util import gen_span_id, gen_trace_id - -__all__ = [ - "add_trace_processor", - "agent_span", - "custom_span", - "function_span", - "generation_span", - "get_current_span", - "get_current_trace", - "guardrail_span", - "handoff_span", - "response_span", - "set_trace_processors", - "set_tracing_disabled", - "trace", - "Trace", - "SpanError", - "Span", - "SpanData", - "AgentSpanData", - "CustomSpanData", - "FunctionSpanData", - "GenerationSpanData", - "GuardrailSpanData", - "HandoffSpanData", - "MCPListToolsSpanData", - "ResponseSpanData", - "SpeechGroupSpanData", - "SpeechSpanData", - "TranscriptionSpanData", - "TracingProcessor", - "gen_trace_id", - "gen_span_id", - "speech_group_span", - "speech_span", - "transcription_span", - "mcp_tools_span", -] - - -def add_trace_processor(span_processor: TracingProcessor) -> None: - """ - Adds a new trace processor. This processor will receive all traces/spans. - """ - GLOBAL_TRACE_PROVIDER.register_processor(span_processor) - - -def set_trace_processors(processors: list[TracingProcessor]) -> None: - """ - Set the list of trace processors. This will replace the current list of processors. - """ - GLOBAL_TRACE_PROVIDER.set_processors(processors) - - -def set_tracing_disabled(disabled: bool) -> None: - """ - Set whether tracing is globally disabled. - """ - GLOBAL_TRACE_PROVIDER.set_disabled(disabled) - - -def set_tracing_export_api_key(api_key: str) -> None: - """ - Set the OpenAI API key for the backend exporter. - """ - default_exporter().set_api_key(api_key) - - -# Add the default processor, which exports traces and spans to the backend in batches. You can -# change the default behavior by either: -# 1. calling add_trace_processor(), which adds additional processors, or -# 2. calling set_trace_processors(), which replaces the default processor. -add_trace_processor(default_processor()) - -atexit.register(GLOBAL_TRACE_PROVIDER.shutdown) diff --git a/src/agents/tracing/create.py b/src/agents/tracing/create.py deleted file mode 100644 index b6fe4610..00000000 --- a/src/agents/tracing/create.py +++ /dev/null @@ -1,455 +0,0 @@ -from __future__ import annotations - -from collections.abc import Mapping, Sequence -from typing import TYPE_CHECKING, Any - -from ..logger import logger -from .setup import GLOBAL_TRACE_PROVIDER -from .span_data import ( - AgentSpanData, - CustomSpanData, - FunctionSpanData, - GenerationSpanData, - GuardrailSpanData, - HandoffSpanData, - MCPListToolsSpanData, - ResponseSpanData, - SpeechGroupSpanData, - SpeechSpanData, - TranscriptionSpanData, -) -from .spans import Span -from .traces import Trace - -if TYPE_CHECKING: - from openai.types.responses import Response - - -def trace( - workflow_name: str, - trace_id: str | None = None, - group_id: str | None = None, - metadata: dict[str, Any] | None = None, - disabled: bool = False, -) -> Trace: - """ - Create a new trace. The trace will not be started automatically; you should either use - it as a context manager (`with trace(...):`) or call `trace.start()` + `trace.finish()` - manually. - - In addition to the workflow name and optional grouping identifier, you can provide - an arbitrary metadata dictionary to attach additional user-defined information to - the trace. - - Args: - workflow_name: The name of the logical app or workflow. For example, you might provide - "code_bot" for a coding agent, or "customer_support_agent" for a customer support agent. - trace_id: The ID of the trace. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_trace_id()` to generate a trace ID, to guarantee that IDs are - correctly formatted. - group_id: Optional grouping identifier to link multiple traces from the same conversation - or process. For instance, you might use a chat thread ID. - metadata: Optional dictionary of additional metadata to attach to the trace. - disabled: If True, we will return a Trace but the Trace will not be recorded. This will - not be checked if there's an existing trace and `even_if_trace_running` is True. - - Returns: - The newly created trace object. - """ - current_trace = GLOBAL_TRACE_PROVIDER.get_current_trace() - if current_trace: - logger.warning( - "Trace already exists. Creating a new trace, but this is probably a mistake." - ) - - return GLOBAL_TRACE_PROVIDER.create_trace( - name=workflow_name, - trace_id=trace_id, - group_id=group_id, - metadata=metadata, - disabled=disabled, - ) - - -def get_current_trace() -> Trace | None: - """Returns the currently active trace, if present.""" - return GLOBAL_TRACE_PROVIDER.get_current_trace() - - -def get_current_span() -> Span[Any] | None: - """Returns the currently active span, if present.""" - return GLOBAL_TRACE_PROVIDER.get_current_span() - - -def agent_span( - name: str, - handoffs: list[str] | None = None, - tools: list[str] | None = None, - output_type: str | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[AgentSpanData]: - """Create a new agent span. The span will not be started automatically, you should either do - `with agent_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - name: The name of the agent. - handoffs: Optional list of agent names to which this agent could hand off control. - tools: Optional list of tool names available to this agent. - output_type: Optional name of the output type produced by the agent. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - - Returns: - The newly created agent span. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=AgentSpanData(name=name, handoffs=handoffs, tools=tools, output_type=output_type), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def function_span( - name: str, - input: str | None = None, - output: str | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[FunctionSpanData]: - """Create a new function span. The span will not be started automatically, you should either do - `with function_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - name: The name of the function. - input: The input to the function. - output: The output of the function. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - - Returns: - The newly created function span. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=FunctionSpanData(name=name, input=input, output=output), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def generation_span( - input: Sequence[Mapping[str, Any]] | None = None, - output: Sequence[Mapping[str, Any]] | None = None, - model: str | None = None, - model_config: Mapping[str, Any] | None = None, - usage: dict[str, Any] | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[GenerationSpanData]: - """Create a new generation span. The span will not be started automatically, you should either - do `with generation_span() ...` or call `span.start()` + `span.finish()` manually. - - This span captures the details of a model generation, including the - input message sequence, any generated outputs, the model name and - configuration, and usage data. If you only need to capture a model - response identifier, use `response_span()` instead. - - Args: - input: The sequence of input messages sent to the model. - output: The sequence of output messages received from the model. - model: The model identifier used for the generation. - model_config: The model configuration (hyperparameters) used. - usage: A dictionary of usage information (input tokens, output tokens, etc.). - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - - Returns: - The newly created generation span. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=GenerationSpanData( - input=input, - output=output, - model=model, - model_config=model_config, - usage=usage, - ), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def response_span( - response: Response | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[ResponseSpanData]: - """Create a new response span. The span will not be started automatically, you should either do - `with response_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - response: The OpenAI Response object. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=ResponseSpanData(response=response), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def handoff_span( - from_agent: str | None = None, - to_agent: str | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[HandoffSpanData]: - """Create a new handoff span. The span will not be started automatically, you should either do - `with handoff_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - from_agent: The name of the agent that is handing off. - to_agent: The name of the agent that is receiving the handoff. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - - Returns: - The newly created handoff span. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=HandoffSpanData(from_agent=from_agent, to_agent=to_agent), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def custom_span( - name: str, - data: dict[str, Any] | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[CustomSpanData]: - """Create a new custom span, to which you can add your own metadata. The span will not be - started automatically, you should either do `with custom_span() ...` or call - `span.start()` + `span.finish()` manually. - - Args: - name: The name of the custom span. - data: Arbitrary structured data to associate with the span. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - - Returns: - The newly created custom span. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=CustomSpanData(name=name, data=data or {}), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def guardrail_span( - name: str, - triggered: bool = False, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[GuardrailSpanData]: - """Create a new guardrail span. The span will not be started automatically, you should either - do `with guardrail_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - name: The name of the guardrail. - triggered: Whether the guardrail was triggered. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=GuardrailSpanData(name=name, triggered=triggered), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def transcription_span( - model: str | None = None, - input: str | None = None, - input_format: str | None = "pcm", - output: str | None = None, - model_config: Mapping[str, Any] | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[TranscriptionSpanData]: - """Create a new transcription span. The span will not be started automatically, you should - either do `with transcription_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - model: The name of the model used for the speech-to-text. - input: The audio input of the speech-to-text transcription, as a base64 encoded string of - audio bytes. - input_format: The format of the audio input (defaults to "pcm"). - output: The output of the speech-to-text transcription. - model_config: The model configuration (hyperparameters) used. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - - Returns: - The newly created speech-to-text span. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=TranscriptionSpanData( - input=input, - input_format=input_format, - output=output, - model=model, - model_config=model_config, - ), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def speech_span( - model: str | None = None, - input: str | None = None, - output: str | None = None, - output_format: str | None = "pcm", - model_config: Mapping[str, Any] | None = None, - first_content_at: str | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[SpeechSpanData]: - """Create a new speech span. The span will not be started automatically, you should either do - `with speech_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - model: The name of the model used for the text-to-speech. - input: The text input of the text-to-speech. - output: The audio output of the text-to-speech as base64 encoded string of PCM audio bytes. - output_format: The format of the audio output (defaults to "pcm"). - model_config: The model configuration (hyperparameters) used. - first_content_at: The time of the first byte of the audio output. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=SpeechSpanData( - model=model, - input=input, - output=output, - output_format=output_format, - model_config=model_config, - first_content_at=first_content_at, - ), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def speech_group_span( - input: str | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[SpeechGroupSpanData]: - """Create a new speech group span. The span will not be started automatically, you should - either do `with speech_group_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - input: The input text used for the speech request. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=SpeechGroupSpanData(input=input), - span_id=span_id, - parent=parent, - disabled=disabled, - ) - - -def mcp_tools_span( - server: str | None = None, - result: list[str] | None = None, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, -) -> Span[MCPListToolsSpanData]: - """Create a new MCP list tools span. The span will not be started automatically, you should - either do `with mcp_tools_span() ...` or call `span.start()` + `span.finish()` manually. - - Args: - server: The name of the MCP server. - result: The result of the MCP list tools call. - span_id: The ID of the span. Optional. If not provided, we will generate an ID. We - recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are - correctly formatted. - parent: The parent span or trace. If not provided, we will automatically use the current - trace/span as the parent. - disabled: If True, we will return a Span but the Span will not be recorded. - """ - return GLOBAL_TRACE_PROVIDER.create_span( - span_data=MCPListToolsSpanData(server=server, result=result), - span_id=span_id, - parent=parent, - disabled=disabled, - ) diff --git a/src/agents/tracing/logger.py b/src/agents/tracing/logger.py deleted file mode 100644 index 661d09b5..00000000 --- a/src/agents/tracing/logger.py +++ /dev/null @@ -1,3 +0,0 @@ -import logging - -logger = logging.getLogger("openai.agents.tracing") diff --git a/src/agents/tracing/processor_interface.py b/src/agents/tracing/processor_interface.py deleted file mode 100644 index 4dcd897c..00000000 --- a/src/agents/tracing/processor_interface.py +++ /dev/null @@ -1,69 +0,0 @@ -import abc -from typing import TYPE_CHECKING, Any - -if TYPE_CHECKING: - from .spans import Span - from .traces import Trace - - -class TracingProcessor(abc.ABC): - """Interface for processing spans.""" - - @abc.abstractmethod - def on_trace_start(self, trace: "Trace") -> None: - """Called when a trace is started. - - Args: - trace: The trace that started. - """ - pass - - @abc.abstractmethod - def on_trace_end(self, trace: "Trace") -> None: - """Called when a trace is finished. - - Args: - trace: The trace that started. - """ - pass - - @abc.abstractmethod - def on_span_start(self, span: "Span[Any]") -> None: - """Called when a span is started. - - Args: - span: The span that started. - """ - pass - - @abc.abstractmethod - def on_span_end(self, span: "Span[Any]") -> None: - """Called when a span is finished. Should not block or raise exceptions. - - Args: - span: The span that finished. - """ - pass - - @abc.abstractmethod - def shutdown(self) -> None: - """Called when the application stops.""" - pass - - @abc.abstractmethod - def force_flush(self) -> None: - """Forces an immediate flush of all queued spans/traces.""" - pass - - -class TracingExporter(abc.ABC): - """Exports traces and spans. For example, could log them or send them to a backend.""" - - @abc.abstractmethod - def export(self, items: list["Trace | Span[Any]"]) -> None: - """Exports a list of traces and spans. - - Args: - items: The items to export. - """ - pass diff --git a/src/agents/tracing/processors.py b/src/agents/tracing/processors.py deleted file mode 100644 index f929d05d..00000000 --- a/src/agents/tracing/processors.py +++ /dev/null @@ -1,276 +0,0 @@ -from __future__ import annotations - -import os -import queue -import random -import threading -import time -from functools import cached_property -from typing import Any - -import httpx - -from ..logger import logger -from .processor_interface import TracingExporter, TracingProcessor -from .spans import Span -from .traces import Trace - - -class ConsoleSpanExporter(TracingExporter): - """Prints the traces and spans to the console.""" - - def export(self, items: list[Trace | Span[Any]]) -> None: - for item in items: - if isinstance(item, Trace): - print(f"[Exporter] Export trace_id={item.trace_id}, name={item.name}, ") - else: - print(f"[Exporter] Export span: {item.export()}") - - -class BackendSpanExporter(TracingExporter): - def __init__( - self, - api_key: str | None = None, - organization: str | None = None, - project: str | None = None, - endpoint: str = "https://api.openai.com/v1/traces/ingest", - max_retries: int = 3, - base_delay: float = 1.0, - max_delay: float = 30.0, - ): - """ - Args: - api_key: The API key for the "Authorization" header. Defaults to - `os.environ["OPENAI_API_KEY"]` if not provided. - organization: The OpenAI organization to use. Defaults to - `os.environ["OPENAI_ORG_ID"]` if not provided. - project: The OpenAI project to use. Defaults to - `os.environ["OPENAI_PROJECT_ID"]` if not provided. - endpoint: The HTTP endpoint to which traces/spans are posted. - max_retries: Maximum number of retries upon failures. - base_delay: Base delay (in seconds) for the first backoff. - max_delay: Maximum delay (in seconds) for backoff growth. - """ - self._api_key = api_key - self._organization = organization - self._project = project - self.endpoint = endpoint - self.max_retries = max_retries - self.base_delay = base_delay - self.max_delay = max_delay - - # Keep a client open for connection pooling across multiple export calls - self._client = httpx.Client(timeout=httpx.Timeout(timeout=60, connect=5.0)) - - def set_api_key(self, api_key: str): - """Set the OpenAI API key for the exporter. - - Args: - api_key: The OpenAI API key to use. This is the same key used by the OpenAI Python - client. - """ - # We're specifically setting the underlying cached property as well - self._api_key = api_key - self.api_key = api_key - - @cached_property - def api_key(self): - return self._api_key or os.environ.get("OPENAI_API_KEY") - - @cached_property - def organization(self): - return self._organization or os.environ.get("OPENAI_ORG_ID") - - @cached_property - def project(self): - return self._project or os.environ.get("OPENAI_PROJECT_ID") - - def export(self, items: list[Trace | Span[Any]]) -> None: - if not items: - return - - if not self.api_key: - logger.warning("OPENAI_API_KEY is not set, skipping trace export") - return - - data = [item.export() for item in items if item.export()] - payload = {"data": data} - - headers = { - "Authorization": f"Bearer {self.api_key}", - "Content-Type": "application/json", - "OpenAI-Beta": "traces=v1", - } - - # Exponential backoff loop - attempt = 0 - delay = self.base_delay - while True: - attempt += 1 - try: - response = self._client.post(url=self.endpoint, headers=headers, json=payload) - - # If the response is successful, break out of the loop - if response.status_code < 300: - logger.debug(f"Exported {len(items)} items") - return - - # If the response is a client error (4xx), we wont retry - if 400 <= response.status_code < 500: - logger.error( - f"[non-fatal] Tracing client error {response.status_code}: {response.text}" - ) - return - - # For 5xx or other unexpected codes, treat it as transient and retry - logger.warning( - f"[non-fatal] Tracing: server error {response.status_code}, retrying." - ) - except httpx.RequestError as exc: - # Network or other I/O error, we'll retry - logger.warning(f"[non-fatal] Tracing: request failed: {exc}") - - # If we reach here, we need to retry or give up - if attempt >= self.max_retries: - logger.error("[non-fatal] Tracing: max retries reached, giving up on this batch.") - return - - # Exponential backoff + jitter - sleep_time = delay + random.uniform(0, 0.1 * delay) # 10% jitter - time.sleep(sleep_time) - delay = min(delay * 2, self.max_delay) - - def close(self): - """Close the underlying HTTP client.""" - self._client.close() - - -class BatchTraceProcessor(TracingProcessor): - """Some implementation notes: - 1. Using Queue, which is thread-safe. - 2. Using a background thread to export spans, to minimize any performance issues. - 3. Spans are stored in memory until they are exported. - """ - - def __init__( - self, - exporter: TracingExporter, - max_queue_size: int = 8192, - max_batch_size: int = 128, - schedule_delay: float = 5.0, - export_trigger_ratio: float = 0.7, - ): - """ - Args: - exporter: The exporter to use. - max_queue_size: The maximum number of spans to store in the queue. After this, we will - start dropping spans. - max_batch_size: The maximum number of spans to export in a single batch. - schedule_delay: The delay between checks for new spans to export. - export_trigger_ratio: The ratio of the queue size at which we will trigger an export. - """ - self._exporter = exporter - self._queue: queue.Queue[Trace | Span[Any]] = queue.Queue(maxsize=max_queue_size) - self._max_queue_size = max_queue_size - self._max_batch_size = max_batch_size - self._schedule_delay = schedule_delay - self._shutdown_event = threading.Event() - - # The queue size threshold at which we export immediately. - self._export_trigger_size = int(max_queue_size * export_trigger_ratio) - - # Track when we next *must* perform a scheduled export - self._next_export_time = time.time() + self._schedule_delay - - self._worker_thread = threading.Thread(target=self._run, daemon=True) - self._worker_thread.start() - - def on_trace_start(self, trace: Trace) -> None: - try: - self._queue.put_nowait(trace) - except queue.Full: - logger.warning("Queue is full, dropping trace.") - - def on_trace_end(self, trace: Trace) -> None: - # We send traces via on_trace_start, so we don't need to do anything here. - pass - - def on_span_start(self, span: Span[Any]) -> None: - # We send spans via on_span_end, so we don't need to do anything here. - pass - - def on_span_end(self, span: Span[Any]) -> None: - try: - self._queue.put_nowait(span) - except queue.Full: - logger.warning("Queue is full, dropping span.") - - def shutdown(self, timeout: float | None = None): - """ - Called when the application stops. We signal our thread to stop, then join it. - """ - self._shutdown_event.set() - self._worker_thread.join(timeout=timeout) - - def force_flush(self): - """ - Forces an immediate flush of all queued spans. - """ - self._export_batches(force=True) - - def _run(self): - while not self._shutdown_event.is_set(): - current_time = time.time() - queue_size = self._queue.qsize() - - # If it's time for a scheduled flush or queue is above the trigger threshold - if current_time >= self._next_export_time or queue_size >= self._export_trigger_size: - self._export_batches(force=False) - # Reset the next scheduled flush time - self._next_export_time = time.time() + self._schedule_delay - else: - # Sleep a short interval so we don't busy-wait. - time.sleep(0.2) - - # Final drain after shutdown - self._export_batches(force=True) - - def _export_batches(self, force: bool = False): - """Drains the queue and exports in batches. If force=True, export everything. - Otherwise, export up to `max_batch_size` repeatedly until the queue is empty or below a - certain threshold. - """ - while True: - items_to_export: list[Span[Any] | Trace] = [] - - # Gather a batch of spans up to max_batch_size - while not self._queue.empty() and ( - force or len(items_to_export) < self._max_batch_size - ): - try: - items_to_export.append(self._queue.get_nowait()) - except queue.Empty: - # Another thread might have emptied the queue between checks - break - - # If we collected nothing, we're done - if not items_to_export: - break - - # Export the batch - self._exporter.export(items_to_export) - - -# Create a shared global instance: -_global_exporter = BackendSpanExporter() -_global_processor = BatchTraceProcessor(_global_exporter) - - -def default_exporter() -> BackendSpanExporter: - """The default exporter, which exports traces and spans to the backend in batches.""" - return _global_exporter - - -def default_processor() -> BatchTraceProcessor: - """The default processor, which exports traces and spans to the backend in batches.""" - return _global_processor diff --git a/src/agents/tracing/scope.py b/src/agents/tracing/scope.py deleted file mode 100644 index 1d31c1bd..00000000 --- a/src/agents/tracing/scope.py +++ /dev/null @@ -1,49 +0,0 @@ -# Holds the current active span -import contextvars -from typing import TYPE_CHECKING, Any - -from ..logger import logger - -if TYPE_CHECKING: - from .spans import Span - from .traces import Trace - -_current_span: contextvars.ContextVar["Span[Any] | None"] = contextvars.ContextVar( - "current_span", default=None -) - -_current_trace: contextvars.ContextVar["Trace | None"] = contextvars.ContextVar( - "current_trace", default=None -) - - -class Scope: - """ - Manages the current span and trace in the context. - """ - - @classmethod - def get_current_span(cls) -> "Span[Any] | None": - return _current_span.get() - - @classmethod - def set_current_span(cls, span: "Span[Any] | None") -> "contextvars.Token[Span[Any] | None]": - return _current_span.set(span) - - @classmethod - def reset_current_span(cls, token: "contextvars.Token[Span[Any] | None]") -> None: - _current_span.reset(token) - - @classmethod - def get_current_trace(cls) -> "Trace | None": - return _current_trace.get() - - @classmethod - def set_current_trace(cls, trace: "Trace | None") -> "contextvars.Token[Trace | None]": - logger.debug(f"Setting current trace: {trace.trace_id if trace else None}") - return _current_trace.set(trace) - - @classmethod - def reset_current_trace(cls, token: "contextvars.Token[Trace | None]") -> None: - logger.debug("Resetting current trace") - _current_trace.reset(token) diff --git a/src/agents/tracing/setup.py b/src/agents/tracing/setup.py deleted file mode 100644 index 3a7c6ade..00000000 --- a/src/agents/tracing/setup.py +++ /dev/null @@ -1,211 +0,0 @@ -from __future__ import annotations - -import os -import threading -from typing import Any - -from ..logger import logger -from . import util -from .processor_interface import TracingProcessor -from .scope import Scope -from .spans import NoOpSpan, Span, SpanImpl, TSpanData -from .traces import NoOpTrace, Trace, TraceImpl - - -class SynchronousMultiTracingProcessor(TracingProcessor): - """ - Forwards all calls to a list of TracingProcessors, in order of registration. - """ - - def __init__(self): - # Using a tuple to avoid race conditions when iterating over processors - self._processors: tuple[TracingProcessor, ...] = () - self._lock = threading.Lock() - - def add_tracing_processor(self, tracing_processor: TracingProcessor): - """ - Add a processor to the list of processors. Each processor will receive all traces/spans. - """ - with self._lock: - self._processors += (tracing_processor,) - - def set_processors(self, processors: list[TracingProcessor]): - """ - Set the list of processors. This will replace the current list of processors. - """ - with self._lock: - self._processors = tuple(processors) - - def on_trace_start(self, trace: Trace) -> None: - """ - Called when a trace is started. - """ - for processor in self._processors: - processor.on_trace_start(trace) - - def on_trace_end(self, trace: Trace) -> None: - """ - Called when a trace is finished. - """ - for processor in self._processors: - processor.on_trace_end(trace) - - def on_span_start(self, span: Span[Any]) -> None: - """ - Called when a span is started. - """ - for processor in self._processors: - processor.on_span_start(span) - - def on_span_end(self, span: Span[Any]) -> None: - """ - Called when a span is finished. - """ - for processor in self._processors: - processor.on_span_end(span) - - def shutdown(self) -> None: - """ - Called when the application stops. - """ - for processor in self._processors: - logger.debug(f"Shutting down trace processor {processor}") - processor.shutdown() - - def force_flush(self): - """ - Force the processors to flush their buffers. - """ - for processor in self._processors: - processor.force_flush() - - -class TraceProvider: - def __init__(self): - self._multi_processor = SynchronousMultiTracingProcessor() - self._disabled = os.environ.get("OPENAI_AGENTS_DISABLE_TRACING", "false").lower() in ( - "true", - "1", - ) - - def register_processor(self, processor: TracingProcessor): - """ - Add a processor to the list of processors. Each processor will receive all traces/spans. - """ - self._multi_processor.add_tracing_processor(processor) - - def set_processors(self, processors: list[TracingProcessor]): - """ - Set the list of processors. This will replace the current list of processors. - """ - self._multi_processor.set_processors(processors) - - def get_current_trace(self) -> Trace | None: - """ - Returns the currently active trace, if any. - """ - return Scope.get_current_trace() - - def get_current_span(self) -> Span[Any] | None: - """ - Returns the currently active span, if any. - """ - return Scope.get_current_span() - - def set_disabled(self, disabled: bool) -> None: - """ - Set whether tracing is disabled. - """ - self._disabled = disabled - - def create_trace( - self, - name: str, - trace_id: str | None = None, - group_id: str | None = None, - metadata: dict[str, Any] | None = None, - disabled: bool = False, - ) -> Trace: - """ - Create a new trace. - """ - if self._disabled or disabled: - logger.debug(f"Tracing is disabled. Not creating trace {name}") - return NoOpTrace() - - trace_id = trace_id or util.gen_trace_id() - - logger.debug(f"Creating trace {name} with id {trace_id}") - - return TraceImpl( - name=name, - trace_id=trace_id, - group_id=group_id, - metadata=metadata, - processor=self._multi_processor, - ) - - def create_span( - self, - span_data: TSpanData, - span_id: str | None = None, - parent: Trace | Span[Any] | None = None, - disabled: bool = False, - ) -> Span[TSpanData]: - """ - Create a new span. - """ - if self._disabled or disabled: - logger.debug(f"Tracing is disabled. Not creating span {span_data}") - return NoOpSpan(span_data) - - if not parent: - current_span = Scope.get_current_span() - current_trace = Scope.get_current_trace() - if current_trace is None: - logger.error( - "No active trace. Make sure to start a trace with `trace()` first" - "Returning NoOpSpan." - ) - return NoOpSpan(span_data) - elif isinstance(current_trace, NoOpTrace) or isinstance(current_span, NoOpSpan): - logger.debug( - f"Parent {current_span} or {current_trace} is no-op, returning NoOpSpan" - ) - return NoOpSpan(span_data) - - parent_id = current_span.span_id if current_span else None - trace_id = current_trace.trace_id - - elif isinstance(parent, Trace): - if isinstance(parent, NoOpTrace): - logger.debug(f"Parent {parent} is no-op, returning NoOpSpan") - return NoOpSpan(span_data) - trace_id = parent.trace_id - parent_id = None - elif isinstance(parent, Span): - if isinstance(parent, NoOpSpan): - logger.debug(f"Parent {parent} is no-op, returning NoOpSpan") - return NoOpSpan(span_data) - parent_id = parent.span_id - trace_id = parent.trace_id - - logger.debug(f"Creating span {span_data} with id {span_id}") - - return SpanImpl( - trace_id=trace_id, - span_id=span_id, - parent_id=parent_id, - processor=self._multi_processor, - span_data=span_data, - ) - - def shutdown(self) -> None: - try: - logger.debug("Shutting down trace provider") - self._multi_processor.shutdown() - except Exception as e: - logger.error(f"Error shutting down trace provider: {e}") - - -GLOBAL_TRACE_PROVIDER = TraceProvider() diff --git a/src/agents/tracing/span_data.py b/src/agents/tracing/span_data.py deleted file mode 100644 index 260e4c45..00000000 --- a/src/agents/tracing/span_data.py +++ /dev/null @@ -1,374 +0,0 @@ -from __future__ import annotations - -import abc -from collections.abc import Mapping, Sequence -from typing import TYPE_CHECKING, Any - -if TYPE_CHECKING: - from openai.types.responses import Response, ResponseInputItemParam - - -class SpanData(abc.ABC): - """ - Represents span data in the trace. - """ - - @abc.abstractmethod - def export(self) -> dict[str, Any]: - """Export the span data as a dictionary.""" - pass - - @property - @abc.abstractmethod - def type(self) -> str: - """Return the type of the span.""" - pass - - -class AgentSpanData(SpanData): - """ - Represents an Agent Span in the trace. - Includes name, handoffs, tools, and output type. - """ - - __slots__ = ("name", "handoffs", "tools", "output_type") - - def __init__( - self, - name: str, - handoffs: list[str] | None = None, - tools: list[str] | None = None, - output_type: str | None = None, - ): - self.name = name - self.handoffs: list[str] | None = handoffs - self.tools: list[str] | None = tools - self.output_type: str | None = output_type - - @property - def type(self) -> str: - return "agent" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "name": self.name, - "handoffs": self.handoffs, - "tools": self.tools, - "output_type": self.output_type, - } - - -class FunctionSpanData(SpanData): - """ - Represents a Function Span in the trace. - Includes input, output and MCP data (if applicable). - """ - - __slots__ = ("name", "input", "output", "mcp_data") - - def __init__( - self, - name: str, - input: str | None, - output: Any | None, - mcp_data: dict[str, Any] | None = None, - ): - self.name = name - self.input = input - self.output = output - self.mcp_data = mcp_data - - @property - def type(self) -> str: - return "function" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "name": self.name, - "input": self.input, - "output": str(self.output) if self.output else None, - "mcp_data": self.mcp_data, - } - - -class GenerationSpanData(SpanData): - """ - Represents a Generation Span in the trace. - Includes input, output, model, model configuration, and usage. - """ - - __slots__ = ( - "input", - "output", - "model", - "model_config", - "usage", - ) - - def __init__( - self, - input: Sequence[Mapping[str, Any]] | None = None, - output: Sequence[Mapping[str, Any]] | None = None, - model: str | None = None, - model_config: Mapping[str, Any] | None = None, - usage: dict[str, Any] | None = None, - ): - self.input = input - self.output = output - self.model = model - self.model_config = model_config - self.usage = usage - - @property - def type(self) -> str: - return "generation" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "input": self.input, - "output": self.output, - "model": self.model, - "model_config": self.model_config, - "usage": self.usage, - } - - -class ResponseSpanData(SpanData): - """ - Represents a Response Span in the trace. - Includes response and input. - """ - - __slots__ = ("response", "input") - - def __init__( - self, - response: Response | None = None, - input: str | list[ResponseInputItemParam] | None = None, - ) -> None: - self.response = response - # This is not used by the OpenAI trace processors, but is useful for other tracing - # processor implementations - self.input = input - - @property - def type(self) -> str: - return "response" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "response_id": self.response.id if self.response else None, - } - - -class HandoffSpanData(SpanData): - """ - Represents a Handoff Span in the trace. - Includes source and desitnation agents. - """ - - __slots__ = ("from_agent", "to_agent") - - def __init__(self, from_agent: str | None, to_agent: str | None): - self.from_agent = from_agent - self.to_agent = to_agent - - @property - def type(self) -> str: - return "handoff" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "from_agent": self.from_agent, - "to_agent": self.to_agent, - } - - -class CustomSpanData(SpanData): - """ - Represents a Custom Span in the trace. - Includes name and data property bag. - """ - - __slots__ = ("name", "data") - - def __init__(self, name: str, data: dict[str, Any]): - self.name = name - self.data = data - - @property - def type(self) -> str: - return "custom" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "name": self.name, - "data": self.data, - } - - -class GuardrailSpanData(SpanData): - """ - Represents a Guardrail Span in the trace. - Includes name and triggered status. - """ - - __slots__ = ("name", "triggered") - - def __init__(self, name: str, triggered: bool = False): - self.name = name - self.triggered = triggered - - @property - def type(self) -> str: - return "guardrail" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "name": self.name, - "triggered": self.triggered, - } - - -class TranscriptionSpanData(SpanData): - """ - Represents a Transcription Span in the trace. - Includes input, output, model, and model configuration. - """ - - __slots__ = ( - "input", - "output", - "model", - "model_config", - ) - - def __init__( - self, - input: str | None = None, - input_format: str | None = "pcm", - output: str | None = None, - model: str | None = None, - model_config: Mapping[str, Any] | None = None, - ): - self.input = input - self.input_format = input_format - self.output = output - self.model = model - self.model_config = model_config - - @property - def type(self) -> str: - return "transcription" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "input": { - "data": self.input or "", - "format": self.input_format, - }, - "output": self.output, - "model": self.model, - "model_config": self.model_config, - } - - -class SpeechSpanData(SpanData): - """ - Represents a Speech Span in the trace. - Includes input, output, model, model configuration, and first content timestamp. - """ - - __slots__ = ("input", "output", "model", "model_config", "first_content_at") - - def __init__( - self, - input: str | None = None, - output: str | None = None, - output_format: str | None = "pcm", - model: str | None = None, - model_config: Mapping[str, Any] | None = None, - first_content_at: str | None = None, - ): - self.input = input - self.output = output - self.output_format = output_format - self.model = model - self.model_config = model_config - self.first_content_at = first_content_at - - @property - def type(self) -> str: - return "speech" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "input": self.input, - "output": { - "data": self.output or "", - "format": self.output_format, - }, - "model": self.model, - "model_config": self.model_config, - "first_content_at": self.first_content_at, - } - - -class SpeechGroupSpanData(SpanData): - """ - Represents a Speech Group Span in the trace. - """ - - __slots__ = "input" - - def __init__( - self, - input: str | None = None, - ): - self.input = input - - @property - def type(self) -> str: - return "speech-group" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "input": self.input, - } - - -class MCPListToolsSpanData(SpanData): - """ - Represents an MCP List Tools Span in the trace. - Includes server and result. - """ - - __slots__ = ( - "server", - "result", - ) - - def __init__(self, server: str | None = None, result: list[str] | None = None): - self.server = server - self.result = result - - @property - def type(self) -> str: - return "mcp_tools" - - def export(self) -> dict[str, Any]: - return { - "type": self.type, - "server": self.server, - "result": self.result, - } diff --git a/src/agents/tracing/spans.py b/src/agents/tracing/spans.py deleted file mode 100644 index ee933e73..00000000 --- a/src/agents/tracing/spans.py +++ /dev/null @@ -1,264 +0,0 @@ -from __future__ import annotations - -import abc -import contextvars -from typing import Any, Generic, TypeVar - -from typing_extensions import TypedDict - -from ..logger import logger -from . import util -from .processor_interface import TracingProcessor -from .scope import Scope -from .span_data import SpanData - -TSpanData = TypeVar("TSpanData", bound=SpanData) - - -class SpanError(TypedDict): - message: str - data: dict[str, Any] | None - - -class Span(abc.ABC, Generic[TSpanData]): - @property - @abc.abstractmethod - def trace_id(self) -> str: - pass - - @property - @abc.abstractmethod - def span_id(self) -> str: - pass - - @property - @abc.abstractmethod - def span_data(self) -> TSpanData: - pass - - @abc.abstractmethod - def start(self, mark_as_current: bool = False): - """ - Start the span. - - Args: - mark_as_current: If true, the span will be marked as the current span. - """ - pass - - @abc.abstractmethod - def finish(self, reset_current: bool = False) -> None: - """ - Finish the span. - - Args: - reset_current: If true, the span will be reset as the current span. - """ - pass - - @abc.abstractmethod - def __enter__(self) -> Span[TSpanData]: - pass - - @abc.abstractmethod - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - @property - @abc.abstractmethod - def parent_id(self) -> str | None: - pass - - @abc.abstractmethod - def set_error(self, error: SpanError) -> None: - pass - - @property - @abc.abstractmethod - def error(self) -> SpanError | None: - pass - - @abc.abstractmethod - def export(self) -> dict[str, Any] | None: - pass - - @property - @abc.abstractmethod - def started_at(self) -> str | None: - pass - - @property - @abc.abstractmethod - def ended_at(self) -> str | None: - pass - - -class NoOpSpan(Span[TSpanData]): - __slots__ = ("_span_data", "_prev_span_token") - - def __init__(self, span_data: TSpanData): - self._span_data = span_data - self._prev_span_token: contextvars.Token[Span[TSpanData] | None] | None = None - - @property - def trace_id(self) -> str: - return "no-op" - - @property - def span_id(self) -> str: - return "no-op" - - @property - def span_data(self) -> TSpanData: - return self._span_data - - @property - def parent_id(self) -> str | None: - return None - - def start(self, mark_as_current: bool = False): - if mark_as_current: - self._prev_span_token = Scope.set_current_span(self) - - def finish(self, reset_current: bool = False) -> None: - if reset_current and self._prev_span_token is not None: - Scope.reset_current_span(self._prev_span_token) - self._prev_span_token = None - - def __enter__(self) -> Span[TSpanData]: - self.start(mark_as_current=True) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - reset_current = True - if exc_type is GeneratorExit: - logger.debug("GeneratorExit, skipping span reset") - reset_current = False - - self.finish(reset_current=reset_current) - - def set_error(self, error: SpanError) -> None: - pass - - @property - def error(self) -> SpanError | None: - return None - - def export(self) -> dict[str, Any] | None: - return None - - @property - def started_at(self) -> str | None: - return None - - @property - def ended_at(self) -> str | None: - return None - - -class SpanImpl(Span[TSpanData]): - __slots__ = ( - "_trace_id", - "_span_id", - "_parent_id", - "_started_at", - "_ended_at", - "_error", - "_prev_span_token", - "_processor", - "_span_data", - ) - - def __init__( - self, - trace_id: str, - span_id: str | None, - parent_id: str | None, - processor: TracingProcessor, - span_data: TSpanData, - ): - self._trace_id = trace_id - self._span_id = span_id or util.gen_span_id() - self._parent_id = parent_id - self._started_at: str | None = None - self._ended_at: str | None = None - self._processor = processor - self._error: SpanError | None = None - self._prev_span_token: contextvars.Token[Span[TSpanData] | None] | None = None - self._span_data = span_data - - @property - def trace_id(self) -> str: - return self._trace_id - - @property - def span_id(self) -> str: - return self._span_id - - @property - def span_data(self) -> TSpanData: - return self._span_data - - @property - def parent_id(self) -> str | None: - return self._parent_id - - def start(self, mark_as_current: bool = False): - if self.started_at is not None: - logger.warning("Span already started") - return - - self._started_at = util.time_iso() - self._processor.on_span_start(self) - if mark_as_current: - self._prev_span_token = Scope.set_current_span(self) - - def finish(self, reset_current: bool = False) -> None: - if self.ended_at is not None: - logger.warning("Span already finished") - return - - self._ended_at = util.time_iso() - self._processor.on_span_end(self) - if reset_current and self._prev_span_token is not None: - Scope.reset_current_span(self._prev_span_token) - self._prev_span_token = None - - def __enter__(self) -> Span[TSpanData]: - self.start(mark_as_current=True) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - reset_current = True - if exc_type is GeneratorExit: - logger.debug("GeneratorExit, skipping span reset") - reset_current = False - - self.finish(reset_current=reset_current) - - def set_error(self, error: SpanError) -> None: - self._error = error - - @property - def error(self) -> SpanError | None: - return self._error - - @property - def started_at(self) -> str | None: - return self._started_at - - @property - def ended_at(self) -> str | None: - return self._ended_at - - def export(self) -> dict[str, Any] | None: - return { - "object": "trace.span", - "id": self.span_id, - "trace_id": self.trace_id, - "parent_id": self._parent_id, - "started_at": self._started_at, - "ended_at": self._ended_at, - "span_data": self.span_data.export(), - "error": self._error, - } diff --git a/src/agents/tracing/traces.py b/src/agents/tracing/traces.py deleted file mode 100644 index 53d06284..00000000 --- a/src/agents/tracing/traces.py +++ /dev/null @@ -1,195 +0,0 @@ -from __future__ import annotations - -import abc -import contextvars -from typing import Any - -from ..logger import logger -from . import util -from .processor_interface import TracingProcessor -from .scope import Scope - - -class Trace: - """ - A trace is the root level object that tracing creates. It represents a logical "workflow". - """ - - @abc.abstractmethod - def __enter__(self) -> Trace: - pass - - @abc.abstractmethod - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - @abc.abstractmethod - def start(self, mark_as_current: bool = False): - """ - Start the trace. - - Args: - mark_as_current: If true, the trace will be marked as the current trace. - """ - pass - - @abc.abstractmethod - def finish(self, reset_current: bool = False): - """ - Finish the trace. - - Args: - reset_current: If true, the trace will be reset as the current trace. - """ - pass - - @property - @abc.abstractmethod - def trace_id(self) -> str: - """ - The trace ID. - """ - pass - - @property - @abc.abstractmethod - def name(self) -> str: - """ - The name of the workflow being traced. - """ - pass - - @abc.abstractmethod - def export(self) -> dict[str, Any] | None: - """ - Export the trace as a dictionary. - """ - pass - - -class NoOpTrace(Trace): - """ - A no-op trace that will not be recorded. - """ - - def __init__(self): - self._started = False - self._prev_context_token: contextvars.Token[Trace | None] | None = None - - def __enter__(self) -> Trace: - if self._started: - if not self._prev_context_token: - logger.error("Trace already started but no context token set") - return self - - self._started = True - self.start(mark_as_current=True) - - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.finish(reset_current=True) - - def start(self, mark_as_current: bool = False): - if mark_as_current: - self._prev_context_token = Scope.set_current_trace(self) - - def finish(self, reset_current: bool = False): - if reset_current and self._prev_context_token is not None: - Scope.reset_current_trace(self._prev_context_token) - self._prev_context_token = None - - @property - def trace_id(self) -> str: - return "no-op" - - @property - def name(self) -> str: - return "no-op" - - def export(self) -> dict[str, Any] | None: - return None - - -NO_OP_TRACE = NoOpTrace() - - -class TraceImpl(Trace): - """ - A trace that will be recorded by the tracing library. - """ - - __slots__ = ( - "_name", - "_trace_id", - "group_id", - "metadata", - "_prev_context_token", - "_processor", - "_started", - ) - - def __init__( - self, - name: str, - trace_id: str | None, - group_id: str | None, - metadata: dict[str, Any] | None, - processor: TracingProcessor, - ): - self._name = name - self._trace_id = trace_id or util.gen_trace_id() - self.group_id = group_id - self.metadata = metadata - self._prev_context_token: contextvars.Token[Trace | None] | None = None - self._processor = processor - self._started = False - - @property - def trace_id(self) -> str: - return self._trace_id - - @property - def name(self) -> str: - return self._name - - def start(self, mark_as_current: bool = False): - if self._started: - return - - self._started = True - self._processor.on_trace_start(self) - - if mark_as_current: - self._prev_context_token = Scope.set_current_trace(self) - - def finish(self, reset_current: bool = False): - if not self._started: - return - - self._processor.on_trace_end(self) - - if reset_current and self._prev_context_token is not None: - Scope.reset_current_trace(self._prev_context_token) - self._prev_context_token = None - - def __enter__(self) -> Trace: - if self._started: - if not self._prev_context_token: - logger.error("Trace already started but no context token set") - return self - - self.start(mark_as_current=True) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.finish(reset_current=exc_type is not GeneratorExit) - - def export(self) -> dict[str, Any] | None: - return { - "object": "trace", - "id": self.trace_id, - "workflow_name": self.name, - "group_id": self.group_id, - "metadata": self.metadata, - } diff --git a/src/agents/tracing/util.py b/src/agents/tracing/util.py deleted file mode 100644 index f546b4e5..00000000 --- a/src/agents/tracing/util.py +++ /dev/null @@ -1,22 +0,0 @@ -import uuid -from datetime import datetime, timezone - - -def time_iso() -> str: - """Returns the current time in ISO 8601 format.""" - return datetime.now(timezone.utc).isoformat() - - -def gen_trace_id() -> str: - """Generates a new trace ID.""" - return f"trace_{uuid.uuid4().hex}" - - -def gen_span_id() -> str: - """Generates a new span ID.""" - return f"span_{uuid.uuid4().hex[:24]}" - - -def gen_group_id() -> str: - """Generates a new group ID.""" - return f"group_{uuid.uuid4().hex[:24]}" diff --git a/src/agents/usage.py b/src/agents/usage.py deleted file mode 100644 index 23d989b4..00000000 --- a/src/agents/usage.py +++ /dev/null @@ -1,22 +0,0 @@ -from dataclasses import dataclass - - -@dataclass -class Usage: - requests: int = 0 - """Total requests made to the LLM API.""" - - input_tokens: int = 0 - """Total input tokens sent, across all requests.""" - - output_tokens: int = 0 - """Total output tokens received, across all requests.""" - - total_tokens: int = 0 - """Total tokens sent and received, across all requests.""" - - def add(self, other: "Usage") -> None: - self.requests += other.requests if other.requests else 0 - self.input_tokens += other.input_tokens if other.input_tokens else 0 - self.output_tokens += other.output_tokens if other.output_tokens else 0 - self.total_tokens += other.total_tokens if other.total_tokens else 0 diff --git a/src/agents/util/__init__.py b/src/agents/util/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agents/util/_coro.py b/src/agents/util/_coro.py deleted file mode 100644 index 647ab86a..00000000 --- a/src/agents/util/_coro.py +++ /dev/null @@ -1,2 +0,0 @@ -async def noop_coroutine() -> None: - pass diff --git a/src/agents/util/_error_tracing.py b/src/agents/util/_error_tracing.py deleted file mode 100644 index 09dbb1de..00000000 --- a/src/agents/util/_error_tracing.py +++ /dev/null @@ -1,16 +0,0 @@ -from typing import Any - -from ..logger import logger -from ..tracing import Span, SpanError, get_current_span - - -def attach_error_to_span(span: Span[Any], error: SpanError) -> None: - span.set_error(error) - - -def attach_error_to_current_span(error: SpanError) -> None: - span = get_current_span() - if span: - attach_error_to_span(span, error) - else: - logger.warning(f"No span to add error {error} to") diff --git a/src/agents/util/_json.py b/src/agents/util/_json.py deleted file mode 100644 index 1e081f68..00000000 --- a/src/agents/util/_json.py +++ /dev/null @@ -1,31 +0,0 @@ -from __future__ import annotations - -from typing import Literal - -from pydantic import TypeAdapter, ValidationError -from typing_extensions import TypeVar - -from ..exceptions import ModelBehaviorError -from ..tracing import SpanError -from ._error_tracing import attach_error_to_current_span - -T = TypeVar("T") - - -def validate_json(json_str: str, type_adapter: TypeAdapter[T], partial: bool) -> T: - partial_setting: bool | Literal["off", "on", "trailing-strings"] = ( - "trailing-strings" if partial else False - ) - try: - validated = type_adapter.validate_json(json_str, experimental_allow_partial=partial_setting) - return validated - except ValidationError as e: - attach_error_to_current_span( - SpanError( - message="Invalid JSON provided", - data={}, - ) - ) - raise ModelBehaviorError( - f"Invalid JSON when parsing {json_str} for {type_adapter}; {e}" - ) from e diff --git a/src/agents/util/_pretty_print.py b/src/agents/util/_pretty_print.py deleted file mode 100644 index afd3e2b1..00000000 --- a/src/agents/util/_pretty_print.py +++ /dev/null @@ -1,56 +0,0 @@ -from typing import TYPE_CHECKING - -from pydantic import BaseModel - -if TYPE_CHECKING: - from ..result import RunResult, RunResultBase, RunResultStreaming - - -def _indent(text: str, indent_level: int) -> str: - indent_string = " " * indent_level - return "\n".join(f"{indent_string}{line}" for line in text.splitlines()) - - -def _final_output_str(result: "RunResultBase") -> str: - if result.final_output is None: - return "None" - elif isinstance(result.final_output, str): - return result.final_output - elif isinstance(result.final_output, BaseModel): - return result.final_output.model_dump_json(indent=2) - else: - return str(result.final_output) - - -def pretty_print_result(result: "RunResult") -> str: - output = "RunResult:" - output += f'\n- Last agent: Agent(name="{result.last_agent.name}", ...)' - output += ( - f"\n- Final output ({type(result.final_output).__name__}):\n" - f"{_indent(_final_output_str(result), 2)}" - ) - output += f"\n- {len(result.new_items)} new item(s)" - output += f"\n- {len(result.raw_responses)} raw response(s)" - output += f"\n- {len(result.input_guardrail_results)} input guardrail result(s)" - output += f"\n- {len(result.output_guardrail_results)} output guardrail result(s)" - output += "\n(See `RunResult` for more details)" - - return output - - -def pretty_print_run_result_streaming(result: "RunResultStreaming") -> str: - output = "RunResultStreaming:" - output += f'\n- Current agent: Agent(name="{result.current_agent.name}", ...)' - output += f"\n- Current turn: {result.current_turn}" - output += f"\n- Max turns: {result.max_turns}" - output += f"\n- Is complete: {result.is_complete}" - output += ( - f"\n- Final output ({type(result.final_output).__name__}):\n" - f"{_indent(_final_output_str(result), 2)}" - ) - output += f"\n- {len(result.new_items)} new item(s)" - output += f"\n- {len(result.raw_responses)} raw response(s)" - output += f"\n- {len(result.input_guardrail_results)} input guardrail result(s)" - output += f"\n- {len(result.output_guardrail_results)} output guardrail result(s)" - output += "\n(See `RunResultStreaming` for more details)" - return output diff --git a/src/agents/util/_transforms.py b/src/agents/util/_transforms.py deleted file mode 100644 index b303074d..00000000 --- a/src/agents/util/_transforms.py +++ /dev/null @@ -1,11 +0,0 @@ -import re - - -def transform_string_function_style(name: str) -> str: - # Replace spaces with underscores - name = name.replace(" ", "_") - - # Replace non-alphanumeric characters with underscores - name = re.sub(r"[^a-zA-Z0-9]", "_", name) - - return name.lower() diff --git a/src/agents/util/_types.py b/src/agents/util/_types.py deleted file mode 100644 index 8571a694..00000000 --- a/src/agents/util/_types.py +++ /dev/null @@ -1,7 +0,0 @@ -from collections.abc import Awaitable -from typing import Union - -from typing_extensions import TypeVar - -T = TypeVar("T") -MaybeAwaitable = Union[Awaitable[T], T] diff --git a/src/agents/version.py b/src/agents/version.py deleted file mode 100644 index 9b22499e..00000000 --- a/src/agents/version.py +++ /dev/null @@ -1,7 +0,0 @@ -import importlib.metadata - -try: - __version__ = importlib.metadata.version("openai-agents") -except importlib.metadata.PackageNotFoundError: - # Fallback if running from source without being installed - __version__ = "0.0.0" diff --git a/src/agents/voice/__init__.py b/src/agents/voice/__init__.py deleted file mode 100644 index 499c064c..00000000 --- a/src/agents/voice/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -from .events import VoiceStreamEvent, VoiceStreamEventAudio, VoiceStreamEventLifecycle -from .exceptions import STTWebsocketConnectionError -from .input import AudioInput, StreamedAudioInput -from .model import ( - StreamedTranscriptionSession, - STTModel, - STTModelSettings, - TTSModel, - TTSModelSettings, - VoiceModelProvider, -) -from .models.openai_model_provider import OpenAIVoiceModelProvider -from .models.openai_stt import OpenAISTTModel, OpenAISTTTranscriptionSession -from .models.openai_tts import OpenAITTSModel -from .pipeline import VoicePipeline -from .pipeline_config import VoicePipelineConfig -from .result import StreamedAudioResult -from .utils import get_sentence_based_splitter -from .workflow import ( - SingleAgentVoiceWorkflow, - SingleAgentWorkflowCallbacks, - VoiceWorkflowBase, - VoiceWorkflowHelper, -) - -__all__ = [ - "AudioInput", - "StreamedAudioInput", - "STTModel", - "STTModelSettings", - "TTSModel", - "TTSModelSettings", - "VoiceModelProvider", - "StreamedAudioResult", - "SingleAgentVoiceWorkflow", - "OpenAIVoiceModelProvider", - "OpenAISTTModel", - "OpenAITTSModel", - "VoiceStreamEventAudio", - "VoiceStreamEventLifecycle", - "VoiceStreamEvent", - "VoicePipeline", - "VoicePipelineConfig", - "get_sentence_based_splitter", - "VoiceWorkflowHelper", - "VoiceWorkflowBase", - "SingleAgentWorkflowCallbacks", - "StreamedTranscriptionSession", - "OpenAISTTTranscriptionSession", - "STTWebsocketConnectionError", -] diff --git a/src/agents/voice/events.py b/src/agents/voice/events.py deleted file mode 100644 index bdcd0815..00000000 --- a/src/agents/voice/events.py +++ /dev/null @@ -1,47 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from typing import Literal, Union - -from typing_extensions import TypeAlias - -from .imports import np, npt - - -@dataclass -class VoiceStreamEventAudio: - """Streaming event from the VoicePipeline""" - - data: npt.NDArray[np.int16 | np.float32] | None - """The audio data.""" - - type: Literal["voice_stream_event_audio"] = "voice_stream_event_audio" - """The type of event.""" - - -@dataclass -class VoiceStreamEventLifecycle: - """Streaming event from the VoicePipeline""" - - event: Literal["turn_started", "turn_ended", "session_ended"] - """The event that occurred.""" - - type: Literal["voice_stream_event_lifecycle"] = "voice_stream_event_lifecycle" - """The type of event.""" - - -@dataclass -class VoiceStreamEventError: - """Streaming event from the VoicePipeline""" - - error: Exception - """The error that occurred.""" - - type: Literal["voice_stream_event_error"] = "voice_stream_event_error" - """The type of event.""" - - -VoiceStreamEvent: TypeAlias = Union[ - VoiceStreamEventAudio, VoiceStreamEventLifecycle, VoiceStreamEventError -] -"""An event from the `VoicePipeline`, streamed via `StreamedAudioResult.stream()`.""" diff --git a/src/agents/voice/exceptions.py b/src/agents/voice/exceptions.py deleted file mode 100644 index 97dccac8..00000000 --- a/src/agents/voice/exceptions.py +++ /dev/null @@ -1,8 +0,0 @@ -from ..exceptions import AgentsException - - -class STTWebsocketConnectionError(AgentsException): - """Exception raised when the STT websocket connection fails.""" - - def __init__(self, message: str): - self.message = message diff --git a/src/agents/voice/imports.py b/src/agents/voice/imports.py deleted file mode 100644 index b1c09508..00000000 --- a/src/agents/voice/imports.py +++ /dev/null @@ -1,11 +0,0 @@ -try: - import numpy as np - import numpy.typing as npt - import websockets -except ImportError as _e: - raise ImportError( - "`numpy` + `websockets` are required to use voice. You can install them via the optional " - "dependency group: `pip install 'openai-agents[voice]'`." - ) from _e - -__all__ = ["np", "npt", "websockets"] diff --git a/src/agents/voice/input.py b/src/agents/voice/input.py deleted file mode 100644 index 8613d27a..00000000 --- a/src/agents/voice/input.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import annotations - -import asyncio -import base64 -import io -import wave -from dataclasses import dataclass - -from ..exceptions import UserError -from .imports import np, npt - -DEFAULT_SAMPLE_RATE = 24000 - - -def _buffer_to_audio_file( - buffer: npt.NDArray[np.int16 | np.float32], - frame_rate: int = DEFAULT_SAMPLE_RATE, - sample_width: int = 2, - channels: int = 1, -) -> tuple[str, io.BytesIO, str]: - if buffer.dtype == np.float32: - # convert to int16 - buffer = np.clip(buffer, -1.0, 1.0) - buffer = (buffer * 32767).astype(np.int16) - elif buffer.dtype != np.int16: - raise UserError("Buffer must be a numpy array of int16 or float32") - - audio_file = io.BytesIO() - with wave.open(audio_file, "w") as wav_file: - wav_file.setnchannels(channels) - wav_file.setsampwidth(sample_width) - wav_file.setframerate(frame_rate) - wav_file.writeframes(buffer.tobytes()) - audio_file.seek(0) - - # (filename, bytes, content_type) - return ("audio.wav", audio_file, "audio/wav") - - -@dataclass -class AudioInput: - """Static audio to be used as input for the VoicePipeline.""" - - buffer: npt.NDArray[np.int16 | np.float32] - """ - A buffer containing the audio data for the agent. Must be a numpy array of int16 or float32. - """ - - frame_rate: int = DEFAULT_SAMPLE_RATE - """The sample rate of the audio data. Defaults to 24000.""" - - sample_width: int = 2 - """The sample width of the audio data. Defaults to 2.""" - - channels: int = 1 - """The number of channels in the audio data. Defaults to 1.""" - - def to_audio_file(self) -> tuple[str, io.BytesIO, str]: - """Returns a tuple of (filename, bytes, content_type)""" - return _buffer_to_audio_file(self.buffer, self.frame_rate, self.sample_width, self.channels) - - def to_base64(self) -> str: - """Returns the audio data as a base64 encoded string.""" - if self.buffer.dtype == np.float32: - # convert to int16 - self.buffer = np.clip(self.buffer, -1.0, 1.0) - self.buffer = (self.buffer * 32767).astype(np.int16) - elif self.buffer.dtype != np.int16: - raise UserError("Buffer must be a numpy array of int16 or float32") - - return base64.b64encode(self.buffer.tobytes()).decode("utf-8") - - -class StreamedAudioInput: - """Audio input represented as a stream of audio data. You can pass this to the `VoicePipeline` - and then push audio data into the queue using the `add_audio` method. - """ - - def __init__(self): - self.queue: asyncio.Queue[npt.NDArray[np.int16 | np.float32]] = asyncio.Queue() - - async def add_audio(self, audio: npt.NDArray[np.int16 | np.float32]): - """Adds more audio data to the stream. - - Args: - audio: The audio data to add. Must be a numpy array of int16 or float32. - """ - await self.queue.put(audio) diff --git a/src/agents/voice/model.py b/src/agents/voice/model.py deleted file mode 100644 index 220d4b48..00000000 --- a/src/agents/voice/model.py +++ /dev/null @@ -1,193 +0,0 @@ -from __future__ import annotations - -import abc -from collections.abc import AsyncIterator -from dataclasses import dataclass -from typing import Any, Callable, Literal - -from .imports import np, npt -from .input import AudioInput, StreamedAudioInput -from .utils import get_sentence_based_splitter - -DEFAULT_TTS_INSTRUCTIONS = ( - "You will receive partial sentences. Do not complete the sentence, just read out the text." -) -DEFAULT_TTS_BUFFER_SIZE = 120 - - -@dataclass -class TTSModelSettings: - """Settings for a TTS model.""" - - voice: ( - Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"] | None - ) = None - """ - The voice to use for the TTS model. If not provided, the default voice for the respective model - will be used. - """ - - buffer_size: int = 120 - """The minimal size of the chunks of audio data that are being streamed out.""" - - dtype: npt.DTypeLike = np.int16 - """The data type for the audio data to be returned in.""" - - transform_data: ( - Callable[[npt.NDArray[np.int16 | np.float32]], npt.NDArray[np.int16 | np.float32]] | None - ) = None - """ - A function to transform the data from the TTS model. This is useful if you want the resulting - audio stream to have the data in a specific shape already. - """ - - instructions: str = ( - "You will receive partial sentences. Do not complete the sentence just read out the text." - ) - """ - The instructions to use for the TTS model. This is useful if you want to control the tone of the - audio output. - """ - - text_splitter: Callable[[str], tuple[str, str]] = get_sentence_based_splitter() - """ - A function to split the text into chunks. This is useful if you want to split the text into - chunks before sending it to the TTS model rather than waiting for the whole text to be - processed. - """ - - speed: float | None = None - """The speed with which the TTS model will read the text. Between 0.25 and 4.0.""" - - -class TTSModel(abc.ABC): - """A text-to-speech model that can convert text into audio output.""" - - @property - @abc.abstractmethod - def model_name(self) -> str: - """The name of the TTS model.""" - pass - - @abc.abstractmethod - def run(self, text: str, settings: TTSModelSettings) -> AsyncIterator[bytes]: - """Given a text string, produces a stream of audio bytes, in PCM format. - - Args: - text: The text to convert to audio. - - Returns: - An async iterator of audio bytes, in PCM format. - """ - pass - - -class StreamedTranscriptionSession(abc.ABC): - """A streamed transcription of audio input.""" - - @abc.abstractmethod - def transcribe_turns(self) -> AsyncIterator[str]: - """Yields a stream of text transcriptions. Each transcription is a turn in the conversation. - - This method is expected to return only after `close()` is called. - """ - pass - - @abc.abstractmethod - async def close(self) -> None: - """Closes the session.""" - pass - - -@dataclass -class STTModelSettings: - """Settings for a speech-to-text model.""" - - prompt: str | None = None - """Instructions for the model to follow.""" - - language: str | None = None - """The language of the audio input.""" - - temperature: float | None = None - """The temperature of the model.""" - - turn_detection: dict[str, Any] | None = None - """The turn detection settings for the model when using streamed audio input.""" - - -class STTModel(abc.ABC): - """A speech-to-text model that can convert audio input into text.""" - - @property - @abc.abstractmethod - def model_name(self) -> str: - """The name of the STT model.""" - pass - - @abc.abstractmethod - async def transcribe( - self, - input: AudioInput, - settings: STTModelSettings, - trace_include_sensitive_data: bool, - trace_include_sensitive_audio_data: bool, - ) -> str: - """Given an audio input, produces a text transcription. - - Args: - input: The audio input to transcribe. - settings: The settings to use for the transcription. - trace_include_sensitive_data: Whether to include sensitive data in traces. - trace_include_sensitive_audio_data: Whether to include sensitive audio data in traces. - - Returns: - The text transcription of the audio input. - """ - pass - - @abc.abstractmethod - async def create_session( - self, - input: StreamedAudioInput, - settings: STTModelSettings, - trace_include_sensitive_data: bool, - trace_include_sensitive_audio_data: bool, - ) -> StreamedTranscriptionSession: - """Creates a new transcription session, which you can push audio to, and receive a stream - of text transcriptions. - - Args: - input: The audio input to transcribe. - settings: The settings to use for the transcription. - trace_include_sensitive_data: Whether to include sensitive data in traces. - trace_include_sensitive_audio_data: Whether to include sensitive audio data in traces. - - Returns: - A new transcription session. - """ - pass - - -class VoiceModelProvider(abc.ABC): - """The base interface for a voice model provider. - - A model provider is responsible for creating speech-to-text and text-to-speech models, given a - name. - """ - - @abc.abstractmethod - def get_stt_model(self, model_name: str | None) -> STTModel: - """Get a speech-to-text model by name. - - Args: - model_name: The name of the model to get. - - Returns: - The speech-to-text model. - """ - pass - - @abc.abstractmethod - def get_tts_model(self, model_name: str | None) -> TTSModel: - """Get a text-to-speech model by name.""" diff --git a/src/agents/voice/models/__init__.py b/src/agents/voice/models/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/agents/voice/models/openai_model_provider.py b/src/agents/voice/models/openai_model_provider.py deleted file mode 100644 index 094df4cc..00000000 --- a/src/agents/voice/models/openai_model_provider.py +++ /dev/null @@ -1,97 +0,0 @@ -from __future__ import annotations - -import httpx -from openai import AsyncOpenAI, DefaultAsyncHttpxClient - -from ...models import _openai_shared -from ..model import STTModel, TTSModel, VoiceModelProvider -from .openai_stt import OpenAISTTModel -from .openai_tts import OpenAITTSModel - -_http_client: httpx.AsyncClient | None = None - - -# If we create a new httpx client for each request, that would mean no sharing of connection pools, -# which would mean worse latency and resource usage. So, we share the client across requests. -def shared_http_client() -> httpx.AsyncClient: - global _http_client - if _http_client is None: - _http_client = DefaultAsyncHttpxClient() - return _http_client - - -DEFAULT_STT_MODEL = "gpt-4o-transcribe" -DEFAULT_TTS_MODEL = "gpt-4o-mini-tts" - - -class OpenAIVoiceModelProvider(VoiceModelProvider): - """A voice model provider that uses OpenAI models.""" - - def __init__( - self, - *, - api_key: str | None = None, - base_url: str | None = None, - openai_client: AsyncOpenAI | None = None, - organization: str | None = None, - project: str | None = None, - ) -> None: - """Create a new OpenAI voice model provider. - - Args: - api_key: The API key to use for the OpenAI client. If not provided, we will use the - default API key. - base_url: The base URL to use for the OpenAI client. If not provided, we will use the - default base URL. - openai_client: An optional OpenAI client to use. If not provided, we will create a new - OpenAI client using the api_key and base_url. - organization: The organization to use for the OpenAI client. - project: The project to use for the OpenAI client. - """ - if openai_client is not None: - assert api_key is None and base_url is None, ( - "Don't provide api_key or base_url if you provide openai_client" - ) - self._client: AsyncOpenAI | None = openai_client - else: - self._client = None - self._stored_api_key = api_key - self._stored_base_url = base_url - self._stored_organization = organization - self._stored_project = project - - # We lazy load the client in case you never actually use OpenAIProvider(). Otherwise - # AsyncOpenAI() raises an error if you don't have an API key set. - def _get_client(self) -> AsyncOpenAI: - if self._client is None: - self._client = _openai_shared.get_default_openai_client() or AsyncOpenAI( - api_key=self._stored_api_key or _openai_shared.get_default_openai_key(), - base_url=self._stored_base_url, - organization=self._stored_organization, - project=self._stored_project, - http_client=shared_http_client(), - ) - - return self._client - - def get_stt_model(self, model_name: str | None) -> STTModel: - """Get a speech-to-text model by name. - - Args: - model_name: The name of the model to get. - - Returns: - The speech-to-text model. - """ - return OpenAISTTModel(model_name or DEFAULT_STT_MODEL, self._get_client()) - - def get_tts_model(self, model_name: str | None) -> TTSModel: - """Get a text-to-speech model by name. - - Args: - model_name: The name of the model to get. - - Returns: - The text-to-speech model. - """ - return OpenAITTSModel(model_name or DEFAULT_TTS_MODEL, self._get_client()) diff --git a/src/agents/voice/models/openai_stt.py b/src/agents/voice/models/openai_stt.py deleted file mode 100644 index 1ae4ea14..00000000 --- a/src/agents/voice/models/openai_stt.py +++ /dev/null @@ -1,456 +0,0 @@ -from __future__ import annotations - -import asyncio -import base64 -import json -import time -from collections.abc import AsyncIterator -from dataclasses import dataclass -from typing import Any, cast - -from openai import AsyncOpenAI - -from ... import _debug -from ...exceptions import AgentsException -from ...logger import logger -from ...tracing import Span, SpanError, TranscriptionSpanData, transcription_span -from ..exceptions import STTWebsocketConnectionError -from ..imports import np, npt, websockets -from ..input import AudioInput, StreamedAudioInput -from ..model import StreamedTranscriptionSession, STTModel, STTModelSettings - -EVENT_INACTIVITY_TIMEOUT = 1000 # Timeout for inactivity in event processing -SESSION_CREATION_TIMEOUT = 10 # Timeout waiting for session.created event -SESSION_UPDATE_TIMEOUT = 10 # Timeout waiting for session.updated event - -DEFAULT_TURN_DETECTION = {"type": "semantic_vad"} - - -@dataclass -class ErrorSentinel: - error: Exception - - -class SessionCompleteSentinel: - pass - - -class WebsocketDoneSentinel: - pass - - -def _audio_to_base64(audio_data: list[npt.NDArray[np.int16 | np.float32]]) -> str: - concatenated_audio = np.concatenate(audio_data) - if concatenated_audio.dtype == np.float32: - # convert to int16 - concatenated_audio = np.clip(concatenated_audio, -1.0, 1.0) - concatenated_audio = (concatenated_audio * 32767).astype(np.int16) - audio_bytes = concatenated_audio.tobytes() - return base64.b64encode(audio_bytes).decode("utf-8") - - -async def _wait_for_event( - event_queue: asyncio.Queue[dict[str, Any]], expected_types: list[str], timeout: float -): - """ - Wait for an event from event_queue whose type is in expected_types within the specified timeout. - """ - start_time = time.time() - while True: - remaining = timeout - (time.time() - start_time) - if remaining <= 0: - raise TimeoutError(f"Timeout waiting for event(s): {expected_types}") - evt = await asyncio.wait_for(event_queue.get(), timeout=remaining) - evt_type = evt.get("type", "") - if evt_type in expected_types: - return evt - elif evt_type == "error": - raise Exception(f"Error event: {evt.get('error')}") - - -class OpenAISTTTranscriptionSession(StreamedTranscriptionSession): - """A transcription session for OpenAI's STT model.""" - - def __init__( - self, - input: StreamedAudioInput, - client: AsyncOpenAI, - model: str, - settings: STTModelSettings, - trace_include_sensitive_data: bool, - trace_include_sensitive_audio_data: bool, - ): - self.connected: bool = False - self._client = client - self._model = model - self._settings = settings - self._turn_detection = settings.turn_detection or DEFAULT_TURN_DETECTION - self._trace_include_sensitive_data = trace_include_sensitive_data - self._trace_include_sensitive_audio_data = trace_include_sensitive_audio_data - - self._input_queue: asyncio.Queue[npt.NDArray[np.int16 | np.float32]] = input.queue - self._output_queue: asyncio.Queue[str | ErrorSentinel | SessionCompleteSentinel] = ( - asyncio.Queue() - ) - self._websocket: websockets.ClientConnection | None = None - self._event_queue: asyncio.Queue[dict[str, Any] | WebsocketDoneSentinel] = asyncio.Queue() - self._state_queue: asyncio.Queue[dict[str, Any]] = asyncio.Queue() - self._turn_audio_buffer: list[npt.NDArray[np.int16 | np.float32]] = [] - self._tracing_span: Span[TranscriptionSpanData] | None = None - - # tasks - self._listener_task: asyncio.Task[Any] | None = None - self._process_events_task: asyncio.Task[Any] | None = None - self._stream_audio_task: asyncio.Task[Any] | None = None - self._connection_task: asyncio.Task[Any] | None = None - self._stored_exception: Exception | None = None - - def _start_turn(self) -> None: - self._tracing_span = transcription_span( - model=self._model, - model_config={ - "temperature": self._settings.temperature, - "language": self._settings.language, - "prompt": self._settings.prompt, - "turn_detection": self._turn_detection, - }, - ) - self._tracing_span.start() - - def _end_turn(self, _transcript: str) -> None: - if len(_transcript) < 1: - return - - if self._tracing_span: - if self._trace_include_sensitive_audio_data: - self._tracing_span.span_data.input = _audio_to_base64(self._turn_audio_buffer) - - self._tracing_span.span_data.input_format = "pcm" - - if self._trace_include_sensitive_data: - self._tracing_span.span_data.output = _transcript - - self._tracing_span.finish() - self._turn_audio_buffer = [] - self._tracing_span = None - - async def _event_listener(self) -> None: - assert self._websocket is not None, "Websocket not initialized" - - async for message in self._websocket: - try: - event = json.loads(message) - - if event.get("type") == "error": - raise STTWebsocketConnectionError(f"Error event: {event.get('error')}") - - if event.get("type") in [ - "session.updated", - "transcription_session.updated", - "session.created", - "transcription_session.created", - ]: - await self._state_queue.put(event) - - await self._event_queue.put(event) - except Exception as e: - await self._output_queue.put(ErrorSentinel(e)) - raise STTWebsocketConnectionError("Error parsing events") from e - await self._event_queue.put(WebsocketDoneSentinel()) - - async def _configure_session(self) -> None: - assert self._websocket is not None, "Websocket not initialized" - await self._websocket.send( - json.dumps( - { - "type": "transcription_session.update", - "session": { - "input_audio_format": "pcm16", - "input_audio_transcription": {"model": self._model}, - "turn_detection": self._turn_detection, - }, - } - ) - ) - - async def _setup_connection(self, ws: websockets.ClientConnection) -> None: - self._websocket = ws - self._listener_task = asyncio.create_task(self._event_listener()) - - try: - event = await _wait_for_event( - self._state_queue, - ["session.created", "transcription_session.created"], - SESSION_CREATION_TIMEOUT, - ) - except TimeoutError as e: - wrapped_err = STTWebsocketConnectionError( - "Timeout waiting for transcription_session.created event" - ) - await self._output_queue.put(ErrorSentinel(wrapped_err)) - raise wrapped_err from e - except Exception as e: - await self._output_queue.put(ErrorSentinel(e)) - raise e - - await self._configure_session() - - try: - event = await _wait_for_event( - self._state_queue, - ["session.updated", "transcription_session.updated"], - SESSION_UPDATE_TIMEOUT, - ) - if _debug.DONT_LOG_MODEL_DATA: - logger.debug("Session updated") - else: - logger.debug(f"Session updated: {event}") - except TimeoutError as e: - wrapped_err = STTWebsocketConnectionError( - "Timeout waiting for transcription_session.updated event" - ) - await self._output_queue.put(ErrorSentinel(wrapped_err)) - raise wrapped_err from e - except Exception as e: - await self._output_queue.put(ErrorSentinel(e)) - raise - - async def _handle_events(self) -> None: - while True: - try: - event = await asyncio.wait_for( - self._event_queue.get(), timeout=EVENT_INACTIVITY_TIMEOUT - ) - if isinstance(event, WebsocketDoneSentinel): - # processed all events and websocket is done - break - - event_type = event.get("type", "unknown") - if event_type == "conversation.item.input_audio_transcription.completed": - transcript = cast(str, event.get("transcript", "")) - if len(transcript) > 0: - self._end_turn(transcript) - self._start_turn() - await self._output_queue.put(transcript) - await asyncio.sleep(0) # yield control - except asyncio.TimeoutError: - # No new events for a while. Assume the session is done. - break - except Exception as e: - await self._output_queue.put(ErrorSentinel(e)) - raise e - await self._output_queue.put(SessionCompleteSentinel()) - - async def _stream_audio( - self, audio_queue: asyncio.Queue[npt.NDArray[np.int16 | np.float32]] - ) -> None: - assert self._websocket is not None, "Websocket not initialized" - self._start_turn() - while True: - buffer = await audio_queue.get() - if buffer is None: - break - - self._turn_audio_buffer.append(buffer) - try: - await self._websocket.send( - json.dumps( - { - "type": "input_audio_buffer.append", - "audio": base64.b64encode(buffer.tobytes()).decode("utf-8"), - } - ) - ) - except websockets.ConnectionClosed: - break - except Exception as e: - await self._output_queue.put(ErrorSentinel(e)) - raise e - - await asyncio.sleep(0) # yield control - - async def _process_websocket_connection(self) -> None: - try: - async with websockets.connect( - "wss://api.openai.com/v1/realtime?intent=transcription", - additional_headers={ - "Authorization": f"Bearer {self._client.api_key}", - "OpenAI-Beta": "realtime=v1", - "OpenAI-Log-Session": "1", - }, - ) as ws: - await self._setup_connection(ws) - self._process_events_task = asyncio.create_task(self._handle_events()) - self._stream_audio_task = asyncio.create_task(self._stream_audio(self._input_queue)) - self.connected = True - if self._listener_task: - await self._listener_task - else: - logger.error("Listener task not initialized") - raise AgentsException("Listener task not initialized") - except Exception as e: - await self._output_queue.put(ErrorSentinel(e)) - raise e - - def _check_errors(self) -> None: - if self._connection_task and self._connection_task.done(): - exc = self._connection_task.exception() - if exc and isinstance(exc, Exception): - self._stored_exception = exc - - if self._process_events_task and self._process_events_task.done(): - exc = self._process_events_task.exception() - if exc and isinstance(exc, Exception): - self._stored_exception = exc - - if self._stream_audio_task and self._stream_audio_task.done(): - exc = self._stream_audio_task.exception() - if exc and isinstance(exc, Exception): - self._stored_exception = exc - - if self._listener_task and self._listener_task.done(): - exc = self._listener_task.exception() - if exc and isinstance(exc, Exception): - self._stored_exception = exc - - def _cleanup_tasks(self) -> None: - if self._listener_task and not self._listener_task.done(): - self._listener_task.cancel() - - if self._process_events_task and not self._process_events_task.done(): - self._process_events_task.cancel() - - if self._stream_audio_task and not self._stream_audio_task.done(): - self._stream_audio_task.cancel() - - if self._connection_task and not self._connection_task.done(): - self._connection_task.cancel() - - async def transcribe_turns(self) -> AsyncIterator[str]: - self._connection_task = asyncio.create_task(self._process_websocket_connection()) - - while True: - try: - turn = await self._output_queue.get() - except asyncio.CancelledError: - break - - if ( - turn is None - or isinstance(turn, ErrorSentinel) - or isinstance(turn, SessionCompleteSentinel) - ): - self._output_queue.task_done() - break - yield turn - self._output_queue.task_done() - - if self._tracing_span: - self._end_turn("") - - if self._websocket: - await self._websocket.close() - - self._check_errors() - if self._stored_exception: - raise self._stored_exception - - async def close(self) -> None: - if self._websocket: - await self._websocket.close() - - self._cleanup_tasks() - - -class OpenAISTTModel(STTModel): - """A speech-to-text model for OpenAI.""" - - def __init__( - self, - model: str, - openai_client: AsyncOpenAI, - ): - """Create a new OpenAI speech-to-text model. - - Args: - model: The name of the model to use. - openai_client: The OpenAI client to use. - """ - self.model = model - self._client = openai_client - - @property - def model_name(self) -> str: - return self.model - - def _non_null_or_not_given(self, value: Any) -> Any: - return value if value is not None else None # NOT_GIVEN - - async def transcribe( - self, - input: AudioInput, - settings: STTModelSettings, - trace_include_sensitive_data: bool, - trace_include_sensitive_audio_data: bool, - ) -> str: - """Transcribe an audio input. - - Args: - input: The audio input to transcribe. - settings: The settings to use for the transcription. - - Returns: - The transcribed text. - """ - with transcription_span( - model=self.model, - input=input.to_base64() if trace_include_sensitive_audio_data else "", - input_format="pcm", - model_config={ - "temperature": self._non_null_or_not_given(settings.temperature), - "language": self._non_null_or_not_given(settings.language), - "prompt": self._non_null_or_not_given(settings.prompt), - }, - ) as span: - try: - response = await self._client.audio.transcriptions.create( - model=self.model, - file=input.to_audio_file(), - prompt=self._non_null_or_not_given(settings.prompt), - language=self._non_null_or_not_given(settings.language), - temperature=self._non_null_or_not_given(settings.temperature), - ) - if trace_include_sensitive_data: - span.span_data.output = response.text - return response.text - except Exception as e: - span.span_data.output = "" - span.set_error(SpanError(message=str(e), data={})) - raise e - - async def create_session( - self, - input: StreamedAudioInput, - settings: STTModelSettings, - trace_include_sensitive_data: bool, - trace_include_sensitive_audio_data: bool, - ) -> StreamedTranscriptionSession: - """Create a new transcription session. - - Args: - input: The audio input to transcribe. - settings: The settings to use for the transcription. - trace_include_sensitive_data: Whether to include sensitive data in traces. - trace_include_sensitive_audio_data: Whether to include sensitive audio data in traces. - - Returns: - A new transcription session. - """ - return OpenAISTTTranscriptionSession( - input, - self._client, - self.model, - settings, - trace_include_sensitive_data, - trace_include_sensitive_audio_data, - ) diff --git a/src/agents/voice/models/openai_tts.py b/src/agents/voice/models/openai_tts.py deleted file mode 100644 index 3b7dcf15..00000000 --- a/src/agents/voice/models/openai_tts.py +++ /dev/null @@ -1,54 +0,0 @@ -from collections.abc import AsyncIterator -from typing import Literal - -from openai import AsyncOpenAI - -from ..model import TTSModel, TTSModelSettings - -DEFAULT_VOICE: Literal["ash"] = "ash" - - -class OpenAITTSModel(TTSModel): - """A text-to-speech model for OpenAI.""" - - def __init__( - self, - model: str, - openai_client: AsyncOpenAI, - ): - """Create a new OpenAI text-to-speech model. - - Args: - model: The name of the model to use. - openai_client: The OpenAI client to use. - """ - self.model = model - self._client = openai_client - - @property - def model_name(self) -> str: - return self.model - - async def run(self, text: str, settings: TTSModelSettings) -> AsyncIterator[bytes]: - """Run the text-to-speech model. - - Args: - text: The text to convert to speech. - settings: The settings to use for the text-to-speech model. - - Returns: - An iterator of audio chunks. - """ - response = self._client.audio.speech.with_streaming_response.create( - model=self.model, - voice=settings.voice or DEFAULT_VOICE, - input=text, - response_format="pcm", - extra_body={ - "instructions": settings.instructions, - }, - ) - - async with response as stream: - async for chunk in stream.iter_bytes(chunk_size=1024): - yield chunk diff --git a/src/agents/voice/pipeline.py b/src/agents/voice/pipeline.py deleted file mode 100644 index d1dac57c..00000000 --- a/src/agents/voice/pipeline.py +++ /dev/null @@ -1,151 +0,0 @@ -from __future__ import annotations - -import asyncio - -from .._run_impl import TraceCtxManager -from ..exceptions import UserError -from ..logger import logger -from .input import AudioInput, StreamedAudioInput -from .model import STTModel, TTSModel -from .pipeline_config import VoicePipelineConfig -from .result import StreamedAudioResult -from .workflow import VoiceWorkflowBase - - -class VoicePipeline: - """An opinionated voice agent pipeline. It works in three steps: - 1. Transcribe audio input into text. - 2. Run the provided `workflow`, which produces a sequence of text responses. - 3. Convert the text responses into streaming audio output. - """ - - def __init__( - self, - *, - workflow: VoiceWorkflowBase, - stt_model: STTModel | str | None = None, - tts_model: TTSModel | str | None = None, - config: VoicePipelineConfig | None = None, - ): - """Create a new voice pipeline. - - Args: - workflow: The workflow to run. See `VoiceWorkflowBase`. - stt_model: The speech-to-text model to use. If not provided, a default OpenAI - model will be used. - tts_model: The text-to-speech model to use. If not provided, a default OpenAI - model will be used. - config: The pipeline configuration. If not provided, a default configuration will be - used. - """ - self.workflow = workflow - self.stt_model = stt_model if isinstance(stt_model, STTModel) else None - self.tts_model = tts_model if isinstance(tts_model, TTSModel) else None - self._stt_model_name = stt_model if isinstance(stt_model, str) else None - self._tts_model_name = tts_model if isinstance(tts_model, str) else None - self.config = config or VoicePipelineConfig() - - async def run(self, audio_input: AudioInput | StreamedAudioInput) -> StreamedAudioResult: - """Run the voice pipeline. - - Args: - audio_input: The audio input to process. This can either be an `AudioInput` instance, - which is a single static buffer, or a `StreamedAudioInput` instance, which is a - stream of audio data that you can append to. - - Returns: - A `StreamedAudioResult` instance. You can use this object to stream audio events and - play them out. - """ - if isinstance(audio_input, AudioInput): - return await self._run_single_turn(audio_input) - elif isinstance(audio_input, StreamedAudioInput): - return await self._run_multi_turn(audio_input) - else: - raise UserError(f"Unsupported audio input type: {type(audio_input)}") - - def _get_tts_model(self) -> TTSModel: - if not self.tts_model: - self.tts_model = self.config.model_provider.get_tts_model(self._tts_model_name) - return self.tts_model - - def _get_stt_model(self) -> STTModel: - if not self.stt_model: - self.stt_model = self.config.model_provider.get_stt_model(self._stt_model_name) - return self.stt_model - - async def _process_audio_input(self, audio_input: AudioInput) -> str: - model = self._get_stt_model() - return await model.transcribe( - audio_input, - self.config.stt_settings, - self.config.trace_include_sensitive_data, - self.config.trace_include_sensitive_audio_data, - ) - - async def _run_single_turn(self, audio_input: AudioInput) -> StreamedAudioResult: - # Since this is single turn, we can use the TraceCtxManager to manage starting/ending the - # trace - with TraceCtxManager( - workflow_name=self.config.workflow_name or "Voice Agent", - trace_id=None, # Automatically generated - group_id=self.config.group_id, - metadata=self.config.trace_metadata, - disabled=self.config.tracing_disabled, - ): - input_text = await self._process_audio_input(audio_input) - - output = StreamedAudioResult( - self._get_tts_model(), self.config.tts_settings, self.config - ) - - async def stream_events(): - try: - async for text_event in self.workflow.run(input_text): - await output._add_text(text_event) - await output._turn_done() - await output._done() - except Exception as e: - logger.error(f"Error processing single turn: {e}") - await output._add_error(e) - raise e - - output._set_task(asyncio.create_task(stream_events())) - return output - - async def _run_multi_turn(self, audio_input: StreamedAudioInput) -> StreamedAudioResult: - with TraceCtxManager( - workflow_name=self.config.workflow_name or "Voice Agent", - trace_id=None, - group_id=self.config.group_id, - metadata=self.config.trace_metadata, - disabled=self.config.tracing_disabled, - ): - output = StreamedAudioResult( - self._get_tts_model(), self.config.tts_settings, self.config - ) - - transcription_session = await self._get_stt_model().create_session( - audio_input, - self.config.stt_settings, - self.config.trace_include_sensitive_data, - self.config.trace_include_sensitive_audio_data, - ) - - async def process_turns(): - try: - async for input_text in transcription_session.transcribe_turns(): - result = self.workflow.run(input_text) - async for text_event in result: - await output._add_text(text_event) - await output._turn_done() - except Exception as e: - logger.error(f"Error processing turns: {e}") - await output._add_error(e) - raise e - finally: - await transcription_session.close() - await output._done() - - output._set_task(asyncio.create_task(process_turns())) - return output diff --git a/src/agents/voice/pipeline_config.py b/src/agents/voice/pipeline_config.py deleted file mode 100644 index a4871612..00000000 --- a/src/agents/voice/pipeline_config.py +++ /dev/null @@ -1,46 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass, field -from typing import Any - -from ..tracing.util import gen_group_id -from .model import STTModelSettings, TTSModelSettings, VoiceModelProvider -from .models.openai_model_provider import OpenAIVoiceModelProvider - - -@dataclass -class VoicePipelineConfig: - """Configuration for a `VoicePipeline`.""" - - model_provider: VoiceModelProvider = field(default_factory=OpenAIVoiceModelProvider) - """The voice model provider to use for the pipeline. Defaults to OpenAI.""" - - tracing_disabled: bool = False - """Whether to disable tracing of the pipeline. Defaults to `False`.""" - - trace_include_sensitive_data: bool = True - """Whether to include sensitive data in traces. Defaults to `True`. This is specifically for the - voice pipeline, and not for anything that goes on inside your Workflow.""" - - trace_include_sensitive_audio_data: bool = True - """Whether to include audio data in traces. Defaults to `True`.""" - - workflow_name: str = "Voice Agent" - """The name of the workflow to use for tracing. Defaults to `Voice Agent`.""" - - group_id: str = field(default_factory=gen_group_id) - """ - A grouping identifier to use for tracing, to link multiple traces from the same conversation - or process. If not provided, we will create a random group ID. - """ - - trace_metadata: dict[str, Any] | None = None - """ - An optional dictionary of additional metadata to include with the trace. - """ - - stt_settings: STTModelSettings = field(default_factory=STTModelSettings) - """The settings to use for the STT model.""" - - tts_settings: TTSModelSettings = field(default_factory=TTSModelSettings) - """The settings to use for the TTS model.""" diff --git a/src/agents/voice/result.py b/src/agents/voice/result.py deleted file mode 100644 index fea79902..00000000 --- a/src/agents/voice/result.py +++ /dev/null @@ -1,287 +0,0 @@ -from __future__ import annotations - -import asyncio -import base64 -from collections.abc import AsyncIterator -from typing import Any - -from ..exceptions import UserError -from ..logger import logger -from ..tracing import Span, SpeechGroupSpanData, speech_group_span, speech_span -from ..tracing.util import time_iso -from .events import ( - VoiceStreamEvent, - VoiceStreamEventAudio, - VoiceStreamEventError, - VoiceStreamEventLifecycle, -) -from .imports import np, npt -from .model import TTSModel, TTSModelSettings -from .pipeline_config import VoicePipelineConfig - - -def _audio_to_base64(audio_data: list[bytes]) -> str: - joined_audio_data = b"".join(audio_data) - return base64.b64encode(joined_audio_data).decode("utf-8") - - -class StreamedAudioResult: - """The output of a `VoicePipeline`. Streams events and audio data as they're generated.""" - - def __init__( - self, - tts_model: TTSModel, - tts_settings: TTSModelSettings, - voice_pipeline_config: VoicePipelineConfig, - ): - """Create a new `StreamedAudioResult` instance. - - Args: - tts_model: The TTS model to use. - tts_settings: The TTS settings to use. - voice_pipeline_config: The voice pipeline config to use. - """ - self.tts_model = tts_model - self.tts_settings = tts_settings - self.total_output_text = "" - self.instructions = tts_settings.instructions - self.text_generation_task: asyncio.Task[Any] | None = None - - self._voice_pipeline_config = voice_pipeline_config - self._text_buffer = "" - self._turn_text_buffer = "" - self._queue: asyncio.Queue[VoiceStreamEvent] = asyncio.Queue() - self._tasks: list[asyncio.Task[Any]] = [] - self._ordered_tasks: list[ - asyncio.Queue[VoiceStreamEvent | None] - ] = [] # New: list to hold local queues for each text segment - self._dispatcher_task: asyncio.Task[Any] | None = ( - None # Task to dispatch audio chunks in order - ) - - self._done_processing = False - self._buffer_size = tts_settings.buffer_size - self._started_processing_turn = False - self._first_byte_received = False - self._generation_start_time: str | None = None - self._completed_session = False - self._stored_exception: BaseException | None = None - self._tracing_span: Span[SpeechGroupSpanData] | None = None - - async def _start_turn(self): - if self._started_processing_turn: - return - - self._tracing_span = speech_group_span() - self._tracing_span.start() - self._started_processing_turn = True - self._first_byte_received = False - self._generation_start_time = time_iso() - await self._queue.put(VoiceStreamEventLifecycle(event="turn_started")) - - def _set_task(self, task: asyncio.Task[Any]): - self.text_generation_task = task - - async def _add_error(self, error: Exception): - await self._queue.put(VoiceStreamEventError(error)) - - def _transform_audio_buffer( - self, buffer: list[bytes], output_dtype: npt.DTypeLike - ) -> npt.NDArray[np.int16 | np.float32]: - np_array = np.frombuffer(b"".join(buffer), dtype=np.int16) - - if output_dtype == np.int16: - return np_array - elif output_dtype == np.float32: - return (np_array.astype(np.float32) / 32767.0).reshape(-1, 1) - else: - raise UserError("Invalid output dtype") - - async def _stream_audio( - self, - text: str, - local_queue: asyncio.Queue[VoiceStreamEvent | None], - finish_turn: bool = False, - ): - with speech_span( - model=self.tts_model.model_name, - input=text if self._voice_pipeline_config.trace_include_sensitive_data else "", - model_config={ - "voice": self.tts_settings.voice, - "instructions": self.instructions, - "speed": self.tts_settings.speed, - }, - output_format="pcm", - parent=self._tracing_span, - ) as tts_span: - try: - first_byte_received = False - buffer: list[bytes] = [] - full_audio_data: list[bytes] = [] - - async for chunk in self.tts_model.run(text, self.tts_settings): - if not first_byte_received: - first_byte_received = True - tts_span.span_data.first_content_at = time_iso() - - if chunk: - buffer.append(chunk) - full_audio_data.append(chunk) - if len(buffer) >= self._buffer_size: - audio_np = self._transform_audio_buffer(buffer, self.tts_settings.dtype) - if self.tts_settings.transform_data: - audio_np = self.tts_settings.transform_data(audio_np) - await local_queue.put( - VoiceStreamEventAudio(data=audio_np) - ) # Use local queue - buffer = [] - if buffer: - audio_np = self._transform_audio_buffer(buffer, self.tts_settings.dtype) - if self.tts_settings.transform_data: - audio_np = self.tts_settings.transform_data(audio_np) - await local_queue.put(VoiceStreamEventAudio(data=audio_np)) # Use local queue - - if self._voice_pipeline_config.trace_include_sensitive_audio_data: - tts_span.span_data.output = _audio_to_base64(full_audio_data) - else: - tts_span.span_data.output = "" - - if finish_turn: - await local_queue.put(VoiceStreamEventLifecycle(event="turn_ended")) - else: - await local_queue.put(None) # Signal completion for this segment - except Exception as e: - tts_span.set_error( - { - "message": str(e), - "data": { - "text": text - if self._voice_pipeline_config.trace_include_sensitive_data - else "", - }, - } - ) - logger.error(f"Error streaming audio: {e}") - - # Signal completion for whole session because of error - await local_queue.put(VoiceStreamEventLifecycle(event="session_ended")) - raise e - - async def _add_text(self, text: str): - await self._start_turn() - - self._text_buffer += text - self.total_output_text += text - self._turn_text_buffer += text - - combined_sentences, self._text_buffer = self.tts_settings.text_splitter(self._text_buffer) - - if len(combined_sentences) >= 20: - local_queue: asyncio.Queue[VoiceStreamEvent | None] = asyncio.Queue() - self._ordered_tasks.append(local_queue) - self._tasks.append( - asyncio.create_task(self._stream_audio(combined_sentences, local_queue)) - ) - if self._dispatcher_task is None: - self._dispatcher_task = asyncio.create_task(self._dispatch_audio()) - - async def _turn_done(self): - if self._text_buffer: - local_queue: asyncio.Queue[VoiceStreamEvent | None] = asyncio.Queue() - self._ordered_tasks.append(local_queue) # Append the local queue for the final segment - self._tasks.append( - asyncio.create_task( - self._stream_audio(self._text_buffer, local_queue, finish_turn=True) - ) - ) - self._text_buffer = "" - self._done_processing = True - if self._dispatcher_task is None: - self._dispatcher_task = asyncio.create_task(self._dispatch_audio()) - await asyncio.gather(*self._tasks) - - def _finish_turn(self): - if self._tracing_span: - if self._voice_pipeline_config.trace_include_sensitive_data: - self._tracing_span.span_data.input = self._turn_text_buffer - else: - self._tracing_span.span_data.input = "" - - self._tracing_span.finish() - self._tracing_span = None - self._turn_text_buffer = "" - self._started_processing_turn = False - - async def _done(self): - self._completed_session = True - await self._wait_for_completion() - - async def _dispatch_audio(self): - # Dispatch audio chunks from each segment in the order they were added - while True: - if len(self._ordered_tasks) == 0: - if self._completed_session: - break - await asyncio.sleep(0) - continue - local_queue = self._ordered_tasks.pop(0) - while True: - chunk = await local_queue.get() - if chunk is None: - break - await self._queue.put(chunk) - if isinstance(chunk, VoiceStreamEventLifecycle): - local_queue.task_done() - if chunk.event == "turn_ended": - self._finish_turn() - break - await self._queue.put(VoiceStreamEventLifecycle(event="session_ended")) - - async def _wait_for_completion(self): - tasks: list[asyncio.Task[Any]] = self._tasks - if self._dispatcher_task is not None: - tasks.append(self._dispatcher_task) - await asyncio.gather(*tasks) - - def _cleanup_tasks(self): - self._finish_turn() - - for task in self._tasks: - if not task.done(): - task.cancel() - - if self._dispatcher_task and not self._dispatcher_task.done(): - self._dispatcher_task.cancel() - - if self.text_generation_task and not self.text_generation_task.done(): - self.text_generation_task.cancel() - - def _check_errors(self): - for task in self._tasks: - if task.done(): - if task.exception(): - self._stored_exception = task.exception() - break - - async def stream(self) -> AsyncIterator[VoiceStreamEvent]: - """Stream the events and audio data as they're generated.""" - while True: - try: - event = await self._queue.get() - except asyncio.CancelledError: - break - if isinstance(event, VoiceStreamEventError): - self._stored_exception = event.error - logger.error(f"Error processing output: {event.error}") - break - if event is None: - break - yield event - if event.type == "voice_stream_event_lifecycle" and event.event == "session_ended": - break - - self._check_errors() - self._cleanup_tasks() - - if self._stored_exception: - raise self._stored_exception diff --git a/src/agents/voice/utils.py b/src/agents/voice/utils.py deleted file mode 100644 index 1535bd0d..00000000 --- a/src/agents/voice/utils.py +++ /dev/null @@ -1,37 +0,0 @@ -import re -from typing import Callable - - -def get_sentence_based_splitter( - min_sentence_length: int = 20, -) -> Callable[[str], tuple[str, str]]: - """Returns a function that splits text into chunks based on sentence boundaries. - - Args: - min_sentence_length: The minimum length of a sentence to be included in a chunk. - - Returns: - A function that splits text into chunks based on sentence boundaries. - """ - - def sentence_based_text_splitter(text_buffer: str) -> tuple[str, str]: - """ - A function to split the text into chunks. This is useful if you want to split the text into - chunks before sending it to the TTS model rather than waiting for the whole text to be - processed. - - Args: - text_buffer: The text to split. - - Returns: - A tuple of the text to process and the remaining text buffer. - """ - sentences = re.split(r"(?<=[.!?])\s+", text_buffer.strip()) - if len(sentences) >= 1: - combined_sentences = " ".join(sentences[:-1]) - if len(combined_sentences) >= min_sentence_length: - remaining_text_buffer = sentences[-1] - return combined_sentences, remaining_text_buffer - return "", text_buffer - - return sentence_based_text_splitter diff --git a/src/agents/voice/workflow.py b/src/agents/voice/workflow.py deleted file mode 100644 index c706ec41..00000000 --- a/src/agents/voice/workflow.py +++ /dev/null @@ -1,93 +0,0 @@ -from __future__ import annotations - -import abc -from collections.abc import AsyncIterator -from typing import Any - -from ..agent import Agent -from ..items import TResponseInputItem -from ..result import RunResultStreaming -from ..run import Runner - - -class VoiceWorkflowBase(abc.ABC): - """ - A base class for a voice workflow. You must implement the `run` method. A "workflow" is any - code you want, that receives a transcription and yields text that will be turned into speech - by a text-to-speech model. - In most cases, you'll create `Agent`s and use `Runner.run_streamed()` to run them, returning - some or all of the text events from the stream. You can use the `VoiceWorkflowHelper` class to - help with extracting text events from the stream. - If you have a simple workflow that has a single starting agent and no custom logic, you can - use `SingleAgentVoiceWorkflow` directly. - """ - - @abc.abstractmethod - def run(self, transcription: str) -> AsyncIterator[str]: - """ - Run the voice workflow. You will receive an input transcription, and must yield text that - will be spoken to the user. You can run whatever logic you want here. In most cases, the - final logic will involve calling `Runner.run_streamed()` and yielding any text events from - the stream. - """ - pass - - -class VoiceWorkflowHelper: - @classmethod - async def stream_text_from(cls, result: RunResultStreaming) -> AsyncIterator[str]: - """Wraps a `RunResultStreaming` object and yields text events from the stream.""" - async for event in result.stream_events(): - if ( - event.type == "raw_response_event" - and event.data.type == "response.output_text.delta" - ): - yield event.data.delta - - -class SingleAgentWorkflowCallbacks: - def on_run(self, workflow: SingleAgentVoiceWorkflow, transcription: str) -> None: - """Called when the workflow is run.""" - pass - - -class SingleAgentVoiceWorkflow(VoiceWorkflowBase): - """A simple voice workflow that runs a single agent. Each transcription and result is added to - the input history. - For more complex workflows (e.g. multiple Runner calls, custom message history, custom logic, - custom configs), subclass `VoiceWorkflowBase` and implement your own logic. - """ - - def __init__(self, agent: Agent[Any], callbacks: SingleAgentWorkflowCallbacks | None = None): - """Create a new single agent voice workflow. - - Args: - agent: The agent to run. - callbacks: Optional callbacks to call during the workflow. - """ - self._input_history: list[TResponseInputItem] = [] - self._current_agent = agent - self._callbacks = callbacks - - async def run(self, transcription: str) -> AsyncIterator[str]: - if self._callbacks: - self._callbacks.on_run(self, transcription) - - # Add the transcription to the input history - self._input_history.append( - { - "role": "user", - "content": transcription, - } - ) - - # Run the agent - result = Runner.run_streamed(self._current_agent, self._input_history) - - # Stream the text from the result - async for chunk in VoiceWorkflowHelper.stream_text_from(result): - yield chunk - - # Update the input history and current agent - self._input_history = result.to_input_list() - self._current_agent = result.last_agent diff --git a/src/agents/extensions/__init__.py b/src/app/__init__.py.py similarity index 100% rename from src/agents/extensions/__init__.py rename to src/app/__init__.py.py diff --git a/src/app/agent_onboarding.py b/src/app/agent_onboarding.py new file mode 100644 index 00000000..18eb7aa3 --- /dev/null +++ b/src/app/agent_onboarding.py @@ -0,0 +1,83 @@ +import sys +import os +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) + +from fastapi import APIRouter, Request +from agents import Agent, Runner +from datetime import datetime +import json +import httpx + +router = APIRouter() + +# Define the onboarding agent +onboarding_agent = Agent( + name="OnboardingAgent", + instructions=""" +You are an onboarding assistant helping new influencers introduce themselves. +Your job is to: +1. Gently ask about their interests, content goals, and style. +2. Summarize their early profile with a few soft fields. +3. Optionally ask a clarifying follow-up question if needed. + +Return your response in the following format: +{ + "output_type": "soft_profile", + "contains_image": false, + "details": { + "interests": ["wellness", "fitness"], + "preferred_style": "authentic, relaxed", + "content_goals": "collaborations, brand storytelling", + "next_question": "Would you consider doing live sessions?" + } +} +Only reply in this format. +""" +) + +@router.post("/onboard") +async def onboard_influencer(request: Request): + data = await request.json() + user_input = data.get("input", "") + user_id = data.get("user_id", "anonymous") + webhook_url = data.get("webhook_url") + debug_info = {} + + result = await Runner.run(onboarding_agent, input=user_input) + + try: + parsed_output = json.loads(result.final_output) + output_type = parsed_output.get("output_type") + output_details = parsed_output.get("details") + contains_image = parsed_output.get("contains_image", False) + + if not output_type or not output_details: + raise ValueError("Missing required output keys") + except Exception as e: + parsed_output = None + output_type = "raw_text" + output_details = result.final_output + contains_image = False + debug_info["validation_error"] = str(e) + debug_info["raw_output"] = result.final_output + + session = { + "agent_type": "onboarding", + "user_id": user_id, + "output_type": output_type, + "contains_image": contains_image, + "output_details": output_details, + "created_at": datetime.utcnow().isoformat(), + } + + if debug_info: + session["debug_info"] = debug_info + + if webhook_url: + async with httpx.AsyncClient() as client: + try: + await client.post(webhook_url, json=session) + except Exception as e: + session["webhook_error"] = str(e) + + return session diff --git a/src/agents/agent_output.py b/src/app/agent_output.py similarity index 83% rename from src/agents/agent_output.py rename to src/app/agent_output.py index 3262c57d..32cef797 100644 --- a/src/agents/agent_output.py +++ b/src/app/agent_output.py @@ -5,9 +5,9 @@ from typing_extensions import TypedDict, get_args, get_origin from .exceptions import ModelBehaviorError, UserError -from .strict_schema import ensure_strict_json_schema -from .tracing import SpanError -from .util import _error_tracing, _json +from agents.strict_schema import ensure_strict_json_schema +from agents.tracing import SpanError +from agents.util import _error_tracing, _json _WRAPPER_DICT_KEY = "response" @@ -142,3 +142,30 @@ def _type_to_str(t: type[Any]) -> str: return f"{origin.__name__}[{args_str}]" else: return str(t) + +# ───────────────────────────────────────────────────────────── +# Additional output schemas (Phase α) +# ───────────────────────────────────────────────────────────── +from typing import List, Union +from pydantic import BaseModel + +class ProfileFieldOut(BaseModel): + field_name: str + field_value: str | list[str] | int | bool + clarification_prompt: str | None = None # ← NEW, optional + +class ClarificationOut(BaseModel): + """Prompt asking the user for missing info.""" + prompt: str + +# Ensure __all__ exists, then extend it +try: + __all__ +except NameError: + __all__ = [] + +__all__.extend([ + "ProfileFieldOut", + "ClarificationOut", +]) + diff --git a/src/app/agent_server.py b/src/app/agent_server.py new file mode 100644 index 00000000..312574b6 --- /dev/null +++ b/src/app/agent_server.py @@ -0,0 +1,265 @@ +# agents/agent_server.py — deterministic handoffs via SDK `handoff()` with robust error handling + +from __future__ import annotations +import os +import sys +import json +import httpx +from datetime import datetime +from dotenv import load_dotenv +from fastapi import FastAPI, Request, HTTPException +from fastapi.middleware.cors import CORSMiddleware +from pydantic import BaseModel +from app.profilebuilder_agent import profilebuilder_agent +from app.profilebuilder import router as profilebuilder_router + +from agents.tool import WebSearchTool + +# ── SDK setup ─────────────────────────────────────────────────────────────── +from agents import Agent, Runner, handoff, RunContextWrapper +from agents.extensions.handoff_prompt import prompt_with_handoff_instructions + +# ── Environment variable for Bubble webhook URL +CHAT_URL = os.getenv("BUBBLE_CHAT_URL") + +# ── send_webhook helper ───────────────────────────────────────────────────── +async def send_webhook(payload: dict): + async with httpx.AsyncClient() as client: + print("=== Webhook Dispatch ===\n", json.dumps(payload, indent=2)) + await client.post(CHAT_URL, json=payload) + print("========================") + +# ── Specialist agents ────────────────────────────────────────────────────── +strategy = Agent( + name="strategy", + instructions="You create 7-day social strategies. Respond ONLY in structured JSON." +) +content = Agent( + name="content", + instructions="You write brand-aligned social posts. Respond ONLY in structured JSON." +) +repurpose = Agent( + name="repurpose", + instructions="You repurpose content. Respond ONLY in structured JSON." +) +feedback = Agent( + name="feedback", + instructions="You critique content. Respond ONLY in structured JSON." +) +profile_analyzer = Agent( + name="profile_analyzer", + instructions=""" +You are an expert in analyzing aspiring influencer profiles. + +Your goal is to deeply understand a user's motivations, niche, audience, and goals based on their collected profile data. Then, generate a highly personalized report that: +- Recognizes their unique strengths and values +- Suggests viable directions based on their niche and goals +- Offers caution or tradeoff considerations +- Is written in clear, supportive, actionable tone + +Use WebSearchTool if needed to briefly validate niche demand or market trends. Output a single MarkdownBlock with the report. Do NOT output JSON or code. Respond only with a single full markdown block. +""", + tools=[WebSearchTool()], +) + +AGENTS = { + "strategy": strategy, + "content": content, + "repurpose": repurpose, + "feedback": feedback, + "profile_analyzer": profile_analyzer, + "profilebuilder": profilebuilder_agent +} + +# ── Pydantic model for Manager handoff payload ──────────────────────────── +class HandoffData(BaseModel): + clarify: str + prompt: str + +# ── Manager agent ────────────────────────────────────────────────────────── +MANAGER_TXT = """ +You are the Manager. When routing, you MUST call exactly one of these tools: + • transfer_to_strategy + • transfer_to_content + • transfer_to_repurpose + • transfer_to_feedback + +Each call must pass a JSON object matching this schema (HandoffData): +{ + "clarify": "", + "prompt": "" +} + +Do NOT output any other JSON or wrap in Markdown. The SDK will handle the rest. +""" + +async def on_handoff(ctx: RunContextWrapper[HandoffData], input_data: HandoffData): + # Send manager clarification webhook + task_id = ctx.context['task_id'] + user_id = ctx.context['user_id'] + payload = build_payload( + task_id=task_id, + user_id=user_id, + agent_type="manager", + message={'type':'text','content': input_data.clarify}, + reason='handoff', + trace=ctx.usage.to_debug_dict() if hasattr(ctx.usage, 'to_debug_dict') else [] + ) + await send_webhook(flatten_payload(payload)) + +manager = Agent( + name="manager", + instructions=prompt_with_handoff_instructions(MANAGER_TXT), + handoffs=[ + handoff(agent=strategy, on_handoff=on_handoff, input_type=HandoffData), + handoff(agent=content, on_handoff=on_handoff, input_type=HandoffData), + handoff(agent=repurpose, on_handoff=on_handoff, input_type=HandoffData), + handoff(agent=feedback, on_handoff=on_handoff, input_type=HandoffData), + ] +) + +ALL_AGENTS = {"manager": manager, **AGENTS} + +# ── Payload builders ───────────────────────────────────────────────────────── +def build_payload(task_id, user_id, agent_type, message, reason, trace): + return { + "task_id": task_id, + "user_id": user_id, + "agent_type": agent_type, + "message": {"type": message.get("type"), "content": message.get("content")}, + "metadata": {"reason": reason}, + "trace": trace, + "created_at": datetime.utcnow().isoformat(), + } + +def flatten_payload(p: dict) -> dict: + """ + Flatten one level of nested message/metadata for Bubble. + """ + return { + "task_id": p["task_id"], + "user_id": p["user_id"], + "agent_type": p["agent_type"], + "message_type": p["message"]["type"], + "message_content": p["message"]["content"], + "metadata_reason": p["metadata"].get("reason", ""), + "created_at": p["created_at"], + } + +# ── FastAPI app ──────────────────────────────────────────────────────────── +app = FastAPI() +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], allow_credentials=True, + allow_methods=["*"], allow_headers=["*"], +) +app.include_router(profilebuilder_router) + +@app.post("/agent") +async def run_agent(req: Request): + data = await req.json() + # normalize prompt + prompt = ( + data.get("prompt") or data.get("user_prompt") or data.get("message") + ) + if not prompt: + raise HTTPException(422, "Missing 'prompt' field") + + # mandatory IDs + task_id = data.get("task_id") + user_id = data.get("user_id") + if not task_id or not user_id: + raise HTTPException(422, "Missing 'task_id' or 'user_id'") + + # 1) Run Manager with error catch for handoff parsing issues + try: + result = await Runner.run( + manager, + input=prompt, + context={"task_id": task_id, "user_id": user_id}, + max_turns=12, + ) + except json.JSONDecodeError as e: + # Handoff JSON malformed: send fallback clarification + fallback = build_payload( + task_id=task_id, + user_id=user_id, + agent_type="manager", + message={"type":"text","content": + "Sorry, I couldn’t process your request—could you rephrase?"}, + reason="handoff_parse_error", + trace=[] + ) + await send_webhook(flatten_payload(fallback)) + return {"ok": True} + + # 2) Final output comes from the last agent in the chain + raw = result.final_output.strip() + print(f"Raw LLM output: {raw}") + try: + json.loads(raw) + reason = "Agent returned structured JSON" + except Exception: + reason = "Agent returned unstructured output" + trace = result.to_debug_dict() if hasattr(result, 'to_debug_dict') else [] + + # 3) Send the final specialist webhook + final_type = (result.agent.name + if hasattr(result, "agent") and result.agent + else "manager") + out_payload = build_payload( + task_id=task_id, + user_id=user_id, + agent_type=final_type, + message={"type":"text","content": raw}, + reason=reason, + trace=trace + ) + await send_webhook(flatten_payload(out_payload)) + + return {"ok": True} + +@app.post("/agent_direct") +async def run_agent_direct(req: Request): + data = await req.json() + + agent_type = data.get("agent_type") + agent = AGENTS.get(agent_type) + if not agent: + raise HTTPException(422, f"Unknown agent_type: {agent_type}") + + task_id = data.get("task_id") + user_id = data.get("user_id") + if not task_id or not user_id: + raise HTTPException(422, "Missing 'task_id' or 'user_id'") + + prompt = data.get("prompt") or data.get("message") or "" + context = { + "task_id": task_id, + "user_id": user_id, + "profile_data": data.get("profile_data") + } + + result = await Runner.run(agent, input=prompt, context=context, max_turns=12) + raw = result.final_output.strip() + + try: + content = json.loads(raw) + reason = "Agent returned structured JSON" + msg = {"type": "structured", "content": content} + except json.JSONDecodeError: + reason = "Agent returned unstructured output" + msg = {"type": "text", "content": raw} + + trace = result.to_debug_dict() if hasattr(result, "to_debug_dict") else [] + + payload = build_payload( + task_id=task_id, + user_id=user_id, + agent_type=agent.name, + message=msg, + reason=reason, + trace=trace + ) + await send_webhook(flatten_payload(payload)) + return {"ok": True} diff --git a/src/app/exceptions.py b/src/app/exceptions.py new file mode 100644 index 00000000..2020ea2d --- /dev/null +++ b/src/app/exceptions.py @@ -0,0 +1,8 @@ +""" +Thin re-export layer so the rest of our code can just do + from .exceptions import ModelBehaviorError, UserError +without depending on the SDK’s namespace directly. +""" +from agents.exceptions import ModelBehaviorError, UserError # type: ignore + +__all__ = ["ModelBehaviorError", "UserError"] diff --git a/src/app/profilebuilder.py b/src/app/profilebuilder.py new file mode 100644 index 00000000..e156317c --- /dev/null +++ b/src/app/profilebuilder.py @@ -0,0 +1,30 @@ +# src/app/profilebuilder.py +from fastapi import APIRouter, Request, HTTPException +from datetime import datetime +from app.storage import get_storage +from agents.run import Runner +from app.profilebuilder_agent import profilebuilder_agent + +router = APIRouter() +storage = get_storage() + +@router.post("/profilebuilder") +async def profilebuilder_handler(req: Request): + data = await req.json() + t, u, p = data.get("task_id"), data.get("user_id"), data.get("prompt") + if not (t and u and p): + raise HTTPException(422, "Missing task_id, user_id, or prompt") + + # 1) Get the agent’s output + result = await Runner.run(profilebuilder_agent, p) + out = result.final_output + ts = datetime.utcnow().isoformat() + + # 2) Save profile field (calls Bubble webhook or Supabase upsert) + await storage.save_profile_field(t, u, out.field_name, out.field_value, ts) + + # 3) Send follow-up chat if needed + if out.clarification_prompt: + await storage.send_chat_message(t, u, out.clarification_prompt, ts) + + return {"ok": True} diff --git a/src/app/profilebuilder_agent.py b/src/app/profilebuilder_agent.py new file mode 100644 index 00000000..c2a0ff57 --- /dev/null +++ b/src/app/profilebuilder_agent.py @@ -0,0 +1,26 @@ +# src/app/profilebuilder_agent.py +# ------------------------------- + +from agents import Agent, output_guardrail, GuardrailFunctionOutput # ← single import line + +from .agent_output import ProfileFieldOut + + +profilebuilder_agent = Agent( # exported under this name + name="Profile-builder", + instructions=( + "Collect ONE profile field at a time from the user.\n" + "After each answer, respond only with valid JSON matching the ProfileFieldOut schema above.\n" + "Use the field clarification_prompt to hold the **next question you want to ask the user** (or null if done with this turn)." + ), + output_type=ProfileFieldOut, +) + + +@output_guardrail +async def schema_guardrail(ctx, agent, llm_output): + # If the JSON parsed into ProfileFieldOut we’re good. + return GuardrailFunctionOutput("schema_ok", tripwire_triggered=False) + + +profilebuilder_agent.output_guardrails = [schema_guardrail] diff --git a/src/app/storage.py b/src/app/storage.py new file mode 100644 index 00000000..ccb3b0f9 --- /dev/null +++ b/src/app/storage.py @@ -0,0 +1,88 @@ +# src/app/storage.py +import os +from abc import ABC, abstractmethod + +# ENV var to pick backend: "bubble" (default) or "supabase" +STORAGE_BACKEND = os.getenv("STORAGE_BACKEND", "bubble").lower() + +class StorageBackend(ABC): + @abstractmethod + async def save_profile_field(self, task_id, user_id, field_name, field_value, created_at): + ... + + @abstractmethod + async def send_chat_message(self, task_id, user_id, message, created_at): + ... + +# will be replaced below +_storage: StorageBackend + +# src/app/storage.py (continued, bubble section) +from app.util.webhook import send_webhook + +class BubbleStorage(StorageBackend): + def __init__(self): + self.profile_url = os.getenv("PROFILE_WEBHOOK_URL") + self.chat_url = os.getenv("CLARIFICATION_WEBHOOK_URL") + + async def save_profile_field(self, task_id, user_id, field_name, field_value, created_at): + payload = { + "task_id": task_id, + "user_id": user_id, + "agent_type": "profilebuilder", + "message_type": "profile_partial", + "message_content": {field_name: field_value}, + "created_at": created_at, + } + await send_webhook(self.profile_url, payload) + + async def send_chat_message(self, task_id, user_id, message, created_at): + payload = { + "task_id": task_id, + "user_id": user_id, + "agent_type": "profilebuilder", + "message_type": "text", + "message_content": message, + "created_at": created_at, + } + await send_webhook(self.chat_url, payload) + +# src/app/storage.py (continued, supabase section) +from datetime import datetime +from supabase import create_client + +class SupabaseStorage(StorageBackend): + def __init__(self): + url = os.getenv("SUPABASE_URL") + key = os.getenv("SUPABASE_SERVICE_KEY") + self.sb = create_client(url, key) + + async def save_profile_field(self, task_id, user_id, field_name, field_value, created_at): + # Build base payload + payload = { + "task_id": task_id, + "user_id": user_id, + "updated_at": created_at, + field_name: field_value + } + # Upsert into profiles table + await self.sb.table("profiles").upsert(payload).execute() + + async def send_chat_message(self, task_id, user_id, message, created_at): + # (optional) if you want to log chat messages + await self.sb.table("chat_messages").insert({ + "task_id": task_id, + "user_id": user_id, + "content": message, + "created_at": created_at, + }).execute() + + +# src/app/storage.py (continued) +if STORAGE_BACKEND == "supabase": + _storage = SupabaseStorage() +else: + _storage = BubbleStorage() + +# export the single instance +get_storage = lambda: _storage diff --git a/src/agents/models/__init__.py b/src/app/util/__init__.py similarity index 100% rename from src/agents/models/__init__.py rename to src/app/util/__init__.py diff --git a/src/app/util/webhook.py b/src/app/util/webhook.py new file mode 100644 index 00000000..72e36954 --- /dev/null +++ b/src/app/util/webhook.py @@ -0,0 +1,51 @@ +"""utils/webhook.py +A single, reusable helper for posting JSON payloads to Bubble‑workflow URLs. + +Usage in your FastAPI code: + + from agents.utils.webhook import send_webhook + + url = TASK_URL_MAP[task_type] # looked up from env‑vars + await send_webhook(url, flattened_payload) + +You keep *all* Bubble‑specific routing logic (task_type → URL) in your +FastAPI service, while this helper focuses solely on safe, idempotent +HTTP posting and basic allow‑list protection. +""" +from __future__ import annotations + +import os +import json +import httpx +from typing import Any, Mapping + +# ----------------------------------------------------------------------------- +# Configuration +# ----------------------------------------------------------------------------- +# Only allow POSTs to URLs that start with one of these roots (prevents exfiltration) +ALLOWED_ROOTS = os.getenv("BUBBLE_DOMAIN_ROOTS", "https://rgtnow.com").split(",") + +# Optional default timeout (seconds) for outbound webhook calls. +HTTP_TIMEOUT = float(os.getenv("WEBHOOK_TIMEOUT", "10")) + +# ----------------------------------------------------------------------------- +# Public helper +# ----------------------------------------------------------------------------- +async def send_webhook(target_url: str, payload: Mapping[str, Any]) -> None: + """POST *payload* as JSON to *target_url*. + + Raises: + ValueError: if *target_url* is outside the allowed Bubble domain roots. + httpx.HTTPStatusError: if Bubble responds with an error status code. + """ + if not any(target_url.startswith(root.strip()) for root in ALLOWED_ROOTS): + raise ValueError( + f"Refusing to POST to {target_url!r} — must begin with one of {ALLOWED_ROOTS!r}" + ) + + async with httpx.AsyncClient(timeout=HTTP_TIMEOUT) as client: + print("=== Webhook Dispatch →", target_url, "===\n", + json.dumps(payload, indent=2, default=str)) + resp = await client.post(target_url, json=payload) + resp.raise_for_status() + return None diff --git a/src/schemas.py b/src/schemas.py new file mode 100644 index 00000000..c26c7ecb --- /dev/null +++ b/src/schemas.py @@ -0,0 +1,17 @@ +from typing import Literal, Optional, Dict, Union +from pydantic import BaseModel, Field + +class NewTask(BaseModel): + action: Literal["new_task"] + task_type: str + user_prompt: str + params: Dict = Field(default_factory=dict) + first_agent: Optional[str] = "auto" + +class NewMessage(BaseModel): + action: Literal["new_message"] + task_id: str + message: str + agent_session_id: Optional[str] = None + +Inbound = Union[NewTask, NewMessage] diff --git a/vendor/openai-agents-python b/vendor/openai-agents-python new file mode 160000 index 00000000..f9763495 --- /dev/null +++ b/vendor/openai-agents-python @@ -0,0 +1 @@ +Subproject commit f9763495b86afcf0c421451a92200e1141fa8dcb